diff --git a/go.mod b/go.mod
index af7a00be..aaf5420f 100644
--- a/go.mod
+++ b/go.mod
@@ -13,7 +13,7 @@ require (
github.com/apparentlymart/go-cidr v1.0.1 // indirect
github.com/apparentlymart/go-textseg v1.0.0 // indirect
github.com/armon/go-radix v1.0.0 // indirect
- github.com/aws/aws-sdk-go v1.25.3 // indirect
+ github.com/aws/aws-sdk-go v1.34.0 // indirect
github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect
github.com/bgentry/speakeasy v0.1.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
@@ -38,7 +38,7 @@ require (
github.com/hashicorp/terraform-config-inspect v0.0.0-20191115094559-17f92b0546e8 // indirect
github.com/hashicorp/terraform-svchost v0.0.0-20191011084731-65d371908596 // indirect
github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d // indirect
- github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af // indirect
+ github.com/jmespath/go-jmespath v0.3.0 // indirect
github.com/konsorten/go-windows-terminal-sequences v1.0.1 // indirect
github.com/mattn/go-colorable v0.1.1 // indirect
github.com/mattn/go-isatty v0.0.5 // indirect
@@ -51,7 +51,7 @@ require (
github.com/mitchellh/mapstructure v1.1.2 // indirect
github.com/mitchellh/reflectwalk v1.0.1 // indirect
github.com/oklog/run v1.0.0 // indirect
- github.com/pkg/errors v0.8.0 // indirect
+ github.com/pkg/errors v0.9.1 // indirect
github.com/posener/complete v1.2.1 // indirect
github.com/sirupsen/logrus v1.3.0 // indirect
github.com/spf13/afero v1.2.2 // indirect
@@ -61,7 +61,7 @@ require (
github.com/zclconf/go-cty-yaml v1.0.1 // indirect
go.opencensus.io v0.22.0 // indirect
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 // indirect
- golang.org/x/net v0.0.0-20191009170851-d66e71096ffb // indirect
+ golang.org/x/net v0.0.0-20200202094626-16171245cfb2 // indirect
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 // indirect
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 // indirect
golang.org/x/text v0.3.2 // indirect
diff --git a/go.sum b/go.sum
index bf18a24d..43ef9cc1 100644
--- a/go.sum
+++ b/go.sum
@@ -24,8 +24,9 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj
github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI=
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM=
-github.com/aws/aws-sdk-go v1.25.3 h1:uM16hIw9BotjZKMZlX05SN2EFtaWfi/NonPKIARiBLQ=
github.com/aws/aws-sdk-go v1.25.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
+github.com/aws/aws-sdk-go v1.34.0 h1:brux2dRrlwCF5JhTL7MUT3WUwo9zfDHZZp3+g3Mvlmo=
+github.com/aws/aws-sdk-go v1.34.0/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas=
github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4=
github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY=
@@ -37,6 +38,7 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68=
github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
@@ -106,8 +108,9 @@ github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKe
github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ=
github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
-github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
+github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc=
+github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/keybase/go-crypto v0.0.0-20161004153544-93f5b35093ba/go.mod h1:ghbZscTyKdM07+Fw3KSi0hcJm+AlEUWj8QLlPtijN/M=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
@@ -151,8 +154,9 @@ github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx
github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw=
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
-github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
@@ -169,8 +173,9 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/ucloud/ucloud-sdk-go v0.21.19 h1:WqD38oLDYxWS3s92DBKR99X3bKktuveTdSigcBynDT4=
github.com/ucloud/ucloud-sdk-go v0.21.19/go.mod h1:dyLmFHmUfgb4RZKYQP9IArlvQ2pxzFthfhwxRzOEPIw=
github.com/ulikunitz/xz v0.5.5 h1:pFrO0lVpTBXLpYw+pnLj6TbvHuyjXMfjGeCwSqCVwok=
@@ -213,8 +218,9 @@ golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20191009170851-d66e71096ffb h1:TR699M2v0qoKTOHxeLgp6zPqaQNs74f01a/ob9W0qko=
golang.org/x/net v0.0.0-20191009170851-d66e71096ffb/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI=
+golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
@@ -281,9 +287,12 @@ google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiq
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
+gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/arn/arn.go b/vendor/github.com/aws/aws-sdk-go/aws/arn/arn.go
new file mode 100644
index 00000000..1c496742
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/arn/arn.go
@@ -0,0 +1,93 @@
+// Package arn provides a parser for interacting with Amazon Resource Names.
+package arn
+
+import (
+ "errors"
+ "strings"
+)
+
+const (
+ arnDelimiter = ":"
+ arnSections = 6
+ arnPrefix = "arn:"
+
+ // zero-indexed
+ sectionPartition = 1
+ sectionService = 2
+ sectionRegion = 3
+ sectionAccountID = 4
+ sectionResource = 5
+
+ // errors
+ invalidPrefix = "arn: invalid prefix"
+ invalidSections = "arn: not enough sections"
+)
+
+// ARN captures the individual fields of an Amazon Resource Name.
+// See http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html for more information.
+type ARN struct {
+ // The partition that the resource is in. For standard AWS regions, the partition is "aws". If you have resources in
+ // other partitions, the partition is "aws-partitionname". For example, the partition for resources in the China
+ // (Beijing) region is "aws-cn".
+ Partition string
+
+ // The service namespace that identifies the AWS product (for example, Amazon S3, IAM, or Amazon RDS). For a list of
+ // namespaces, see
+ // http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces.
+ Service string
+
+ // The region the resource resides in. Note that the ARNs for some resources do not require a region, so this
+ // component might be omitted.
+ Region string
+
+ // The ID of the AWS account that owns the resource, without the hyphens. For example, 123456789012. Note that the
+ // ARNs for some resources don't require an account number, so this component might be omitted.
+ AccountID string
+
+ // The content of this part of the ARN varies by service. It often includes an indicator of the type of resource —
+ // for example, an IAM user or Amazon RDS database - followed by a slash (/) or a colon (:), followed by the
+ // resource name itself. Some services allows paths for resource names, as described in
+ // http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arns-paths.
+ Resource string
+}
+
+// Parse parses an ARN into its constituent parts.
+//
+// Some example ARNs:
+// arn:aws:elasticbeanstalk:us-east-1:123456789012:environment/My App/MyEnvironment
+// arn:aws:iam::123456789012:user/David
+// arn:aws:rds:eu-west-1:123456789012:db:mysql-db
+// arn:aws:s3:::my_corporate_bucket/exampleobject.png
+func Parse(arn string) (ARN, error) {
+ if !strings.HasPrefix(arn, arnPrefix) {
+ return ARN{}, errors.New(invalidPrefix)
+ }
+ sections := strings.SplitN(arn, arnDelimiter, arnSections)
+ if len(sections) != arnSections {
+ return ARN{}, errors.New(invalidSections)
+ }
+ return ARN{
+ Partition: sections[sectionPartition],
+ Service: sections[sectionService],
+ Region: sections[sectionRegion],
+ AccountID: sections[sectionAccountID],
+ Resource: sections[sectionResource],
+ }, nil
+}
+
+// IsARN returns whether the given string is an ARN by looking for
+// whether the string starts with "arn:" and contains the correct number
+// of sections delimited by colons(:).
+func IsARN(arn string) bool {
+ return strings.HasPrefix(arn, arnPrefix) && strings.Count(arn, ":") >= arnSections-1
+}
+
+// String returns the canonical representation of the ARN
+func (arn ARN) String() string {
+ return arnPrefix +
+ arn.Partition + arnDelimiter +
+ arn.Service + arnDelimiter +
+ arn.Region + arnDelimiter +
+ arn.AccountID + arnDelimiter +
+ arn.Resource
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go
index 285e54d6..a4eb6a7f 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go
@@ -70,7 +70,7 @@ func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTer
value = value.FieldByNameFunc(func(name string) bool {
if c == name {
return true
- } else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) {
+ } else if !caseSensitive && strings.EqualFold(name, c) {
return true
}
return false
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go
index c022407f..03334d69 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go
@@ -12,6 +12,7 @@ import (
type Config struct {
Config *aws.Config
Handlers request.Handlers
+ PartitionID string
Endpoint string
SigningRegion string
SigningName string
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
index 0fda4251..9f6af19d 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
@@ -16,11 +16,11 @@ import (
type DefaultRetryer struct {
// Num max Retries is the number of max retries that will be performed.
// By default, this is zero.
- NumMaxRetries int
+ NumMaxRetries int
// MinRetryDelay is the minimum retry delay after which retry will be performed.
// If not set, the value is 0ns.
- MinRetryDelay time.Duration
+ MinRetryDelay time.Duration
// MinThrottleRetryDelay is the minimum retry delay when throttled.
// If not set, the value is 0ns.
@@ -28,7 +28,7 @@ type DefaultRetryer struct {
// MaxRetryDelay is the maximum retry delay before which retry must be performed.
// If not set, the value is 0ns.
- MaxRetryDelay time.Duration
+ MaxRetryDelay time.Duration
// MaxThrottleDelay is the maximum retry delay when throttled.
// If not set, the value is 0ns.
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go
index 920e9fdd..0c48f72e 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go
@@ -5,6 +5,7 @@ type ClientInfo struct {
ServiceName string
ServiceID string
APIVersion string
+ PartitionID string
Endpoint string
SigningName string
SigningRegion string
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go
index fd1e240f..3b809e84 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/config.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go
@@ -43,7 +43,7 @@ type Config struct {
// An optional endpoint URL (hostname only or fully qualified URI)
// that overrides the default generated endpoint for a client. Set this
- // to `""` to use the default generated endpoint.
+ // to `nil` or the value to `""` to use the default generated endpoint.
//
// Note: You must still provide a `Region` value when specifying an
// endpoint for a client.
@@ -138,7 +138,7 @@ type Config struct {
// `ExpectContinueTimeout` for information on adjusting the continue wait
// timeout. https://golang.org/pkg/net/http/#Transport
//
- // You should use this flag to disble 100-Continue if you experience issues
+ // You should use this flag to disable 100-Continue if you experience issues
// with proxies or third party S3 compatible services.
S3Disable100Continue *bool
@@ -161,6 +161,17 @@ type Config struct {
// on GetObject API calls.
S3DisableContentMD5Validation *bool
+ // Set this to `true` to have the S3 service client to use the region specified
+ // in the ARN, when an ARN is provided as an argument to a bucket parameter.
+ S3UseARNRegion *bool
+
+ // Set this to `true` to enable the SDK to unmarshal API response header maps to
+ // normalized lower case map keys.
+ //
+ // For example S3's X-Amz-Meta prefixed header will be unmarshaled to lower case
+ // Metadata member's map keys. The value of the header in the map is unaffected.
+ LowerCaseHeaderMaps *bool
+
// Set this to `true` to disable the EC2Metadata client from overriding the
// default http.Client's Timeout. This is helpful if you do not want the
// EC2Metadata client to create a new http.Client. This options is only
@@ -172,7 +183,7 @@ type Config struct {
//
// Example:
// sess := session.Must(session.NewSession(aws.NewConfig()
- // .WithEC2MetadataDiableTimeoutOverride(true)))
+ // .WithEC2MetadataDisableTimeoutOverride(true)))
//
// svc := s3.New(sess)
//
@@ -183,7 +194,7 @@ type Config struct {
// both IPv4 and IPv6 addressing.
//
// Setting this for a service which does not support dual stack will fail
- // to make requets. It is not recommended to set this value on the session
+ // to make requests. It is not recommended to set this value on the session
// as it will apply to all service clients created with the session. Even
// services which don't support dual stack endpoints.
//
@@ -227,6 +238,7 @@ type Config struct {
// EnableEndpointDiscovery will allow for endpoint discovery on operations that
// have the definition in its model. By default, endpoint discovery is off.
+ // To use EndpointDiscovery, Endpoint should be unset or set to an empty string.
//
// Example:
// sess := session.Must(session.NewSession(&aws.Config{
@@ -246,6 +258,12 @@ type Config struct {
// Disabling this feature is useful when you want to use local endpoints
// for testing that do not support the modeled host prefix pattern.
DisableEndpointHostPrefix *bool
+
+ // STSRegionalEndpoint will enable regional or legacy endpoint resolving
+ STSRegionalEndpoint endpoints.STSRegionalEndpoint
+
+ // S3UsEast1RegionalEndpoint will enable regional or legacy endpoint resolving
+ S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint
}
// NewConfig returns a new Config pointer that can be chained with builder
@@ -379,6 +397,13 @@ func (c *Config) WithS3DisableContentMD5Validation(enable bool) *Config {
}
+// WithS3UseARNRegion sets a config S3UseARNRegion value and
+// returning a Config pointer for chaining
+func (c *Config) WithS3UseARNRegion(enable bool) *Config {
+ c.S3UseARNRegion = &enable
+ return c
+}
+
// WithUseDualStack sets a config UseDualStack value returning a Config
// pointer for chaining.
func (c *Config) WithUseDualStack(enable bool) *Config {
@@ -420,6 +445,20 @@ func (c *Config) MergeIn(cfgs ...*Config) {
}
}
+// WithSTSRegionalEndpoint will set whether or not to use regional endpoint flag
+// when resolving the endpoint for a service
+func (c *Config) WithSTSRegionalEndpoint(sre endpoints.STSRegionalEndpoint) *Config {
+ c.STSRegionalEndpoint = sre
+ return c
+}
+
+// WithS3UsEast1RegionalEndpoint will set whether or not to use regional endpoint flag
+// when resolving the endpoint for a service
+func (c *Config) WithS3UsEast1RegionalEndpoint(sre endpoints.S3UsEast1RegionalEndpoint) *Config {
+ c.S3UsEast1RegionalEndpoint = sre
+ return c
+}
+
func mergeInConfig(dst *Config, other *Config) {
if other == nil {
return
@@ -493,6 +532,10 @@ func mergeInConfig(dst *Config, other *Config) {
dst.S3DisableContentMD5Validation = other.S3DisableContentMD5Validation
}
+ if other.S3UseARNRegion != nil {
+ dst.S3UseARNRegion = other.S3UseARNRegion
+ }
+
if other.UseDualStack != nil {
dst.UseDualStack = other.UseDualStack
}
@@ -520,6 +563,14 @@ func mergeInConfig(dst *Config, other *Config) {
if other.DisableEndpointHostPrefix != nil {
dst.DisableEndpointHostPrefix = other.DisableEndpointHostPrefix
}
+
+ if other.STSRegionalEndpoint != endpoints.UnsetSTSEndpoint {
+ dst.STSRegionalEndpoint = other.STSRegionalEndpoint
+ }
+
+ if other.S3UsEast1RegionalEndpoint != endpoints.UnsetS3UsEast1Endpoint {
+ dst.S3UsEast1RegionalEndpoint = other.S3UsEast1RegionalEndpoint
+ }
}
// Copy will return a shallow copy of the Config object. If any additional
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go
index 66c5945d..2f944633 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go
@@ -2,42 +2,8 @@
package aws
-import "time"
-
-// An emptyCtx is a copy of the Go 1.7 context.emptyCtx type. This is copied to
-// provide a 1.6 and 1.5 safe version of context that is compatible with Go
-// 1.7's Context.
-//
-// An emptyCtx is never canceled, has no values, and has no deadline. It is not
-// struct{}, since vars of this type must have distinct addresses.
-type emptyCtx int
-
-func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
- return
-}
-
-func (*emptyCtx) Done() <-chan struct{} {
- return nil
-}
-
-func (*emptyCtx) Err() error {
- return nil
-}
-
-func (*emptyCtx) Value(key interface{}) interface{} {
- return nil
-}
-
-func (e *emptyCtx) String() string {
- switch e {
- case backgroundCtx:
- return "aws.BackgroundContext"
- }
- return "unknown empty Context"
-}
-
-var (
- backgroundCtx = new(emptyCtx)
+import (
+ "github.com/aws/aws-sdk-go/internal/context"
)
// BackgroundContext returns a context that will never be canceled, has no
@@ -52,5 +18,5 @@ var (
//
// See https://golang.org/pkg/context for more information on Contexts.
func BackgroundContext() Context {
- return backgroundCtx
+ return context.BackgroundCtx
}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
index 0c60e612..d95a5eb5 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
@@ -161,7 +161,7 @@ func handleSendError(r *request.Request, err error) {
}
// Catch all request errors, and let the default retrier determine
// if the error is retryable.
- r.Error = awserr.New("RequestError", "send request failed", err)
+ r.Error = awserr.New(request.ErrCodeRequestError, "send request failed", err)
// Override the error with a context canceled error, if that was canceled.
ctx := r.Context()
@@ -225,6 +225,8 @@ var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointH
if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" {
r.Error = aws.ErrMissingRegion
} else if r.ClientInfo.Endpoint == "" {
+ // Was any endpoint provided by the user, or one was derived by the
+ // SDK's endpoint resolver?
r.Error = aws.ErrMissingEndpoint
}
}}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go
new file mode 100644
index 00000000..5852b264
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go
@@ -0,0 +1,22 @@
+// +build !go1.7
+
+package credentials
+
+import (
+ "github.com/aws/aws-sdk-go/internal/context"
+)
+
+// backgroundContext returns a context that will never be canceled, has no
+// values, and no deadline. This context is used by the SDK to provide
+// backwards compatibility with non-context API operations and functionality.
+//
+// Go 1.6 and before:
+// This context function is equivalent to context.Background in the Go stdlib.
+//
+// Go 1.7 and later:
+// The context returned will be the value returned by context.Background()
+//
+// See https://golang.org/pkg/context for more information on Contexts.
+func backgroundContext() Context {
+ return context.BackgroundCtx
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go
new file mode 100644
index 00000000..388b2154
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go
@@ -0,0 +1,20 @@
+// +build go1.7
+
+package credentials
+
+import "context"
+
+// backgroundContext returns a context that will never be canceled, has no
+// values, and no deadline. This context is used by the SDK to provide
+// backwards compatibility with non-context API operations and functionality.
+//
+// Go 1.6 and before:
+// This context function is equivalent to context.Background in the Go stdlib.
+//
+// Go 1.7 and later:
+// The context returned will be the value returned by context.Background()
+//
+// See https://golang.org/pkg/context for more information on Contexts.
+func backgroundContext() Context {
+ return context.Background()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go
new file mode 100644
index 00000000..8152a864
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go
@@ -0,0 +1,39 @@
+// +build !go1.9
+
+package credentials
+
+import "time"
+
+// Context is an copy of the Go v1.7 stdlib's context.Context interface.
+// It is represented as a SDK interface to enable you to use the "WithContext"
+// API methods with Go v1.6 and a Context type such as golang.org/x/net/context.
+//
+// This type, aws.Context, and context.Context are equivalent.
+//
+// See https://golang.org/pkg/context on how to use contexts.
+type Context interface {
+ // Deadline returns the time when work done on behalf of this context
+ // should be canceled. Deadline returns ok==false when no deadline is
+ // set. Successive calls to Deadline return the same results.
+ Deadline() (deadline time.Time, ok bool)
+
+ // Done returns a channel that's closed when work done on behalf of this
+ // context should be canceled. Done may return nil if this context can
+ // never be canceled. Successive calls to Done return the same value.
+ Done() <-chan struct{}
+
+ // Err returns a non-nil error value after Done is closed. Err returns
+ // Canceled if the context was canceled or DeadlineExceeded if the
+ // context's deadline passed. No other values for Err are defined.
+ // After Done is closed, successive calls to Err return the same value.
+ Err() error
+
+ // Value returns the value associated with this context for key, or nil
+ // if no value is associated with key. Successive calls to Value with
+ // the same key returns the same result.
+ //
+ // Use context values only for request-scoped data that transits
+ // processes and API boundaries, not for passing optional parameters to
+ // functions.
+ Value(key interface{}) interface{}
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go
new file mode 100644
index 00000000..4356edb3
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go
@@ -0,0 +1,13 @@
+// +build go1.9
+
+package credentials
+
+import "context"
+
+// Context is an alias of the Go stdlib's context.Context interface.
+// It can be used within the SDK's API operation "WithContext" methods.
+//
+// This type, aws.Context, and context.Context are equivalent.
+//
+// See https://golang.org/pkg/context on how to use contexts.
+type Context = context.Context
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
index 4af59215..9f8fd92a 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
@@ -50,10 +50,11 @@ package credentials
import (
"fmt"
- "sync"
+ "sync/atomic"
"time"
"github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/internal/sync/singleflight"
)
// AnonymousCredentials is an empty Credential object that can be used as
@@ -106,6 +107,13 @@ type Provider interface {
IsExpired() bool
}
+// ProviderWithContext is a Provider that can retrieve credentials with a Context
+type ProviderWithContext interface {
+ Provider
+
+ RetrieveWithContext(Context) (Value, error)
+}
+
// An Expirer is an interface that Providers can implement to expose the expiration
// time, if known. If the Provider cannot accurately provide this info,
// it should not implement this interface.
@@ -197,24 +205,24 @@ func (e *Expiry) ExpiresAt() time.Time {
// first instance of the credentials Value. All calls to Get() after that
// will return the cached credentials Value until IsExpired() returns true.
type Credentials struct {
- creds Value
- forceRefresh bool
-
- m sync.RWMutex
+ creds atomic.Value
+ sf singleflight.Group
provider Provider
}
// NewCredentials returns a pointer to a new Credentials with the provider set.
func NewCredentials(provider Provider) *Credentials {
- return &Credentials{
- provider: provider,
- forceRefresh: true,
+ c := &Credentials{
+ provider: provider,
}
+ c.creds.Store(Value{})
+ return c
}
-// Get returns the credentials value, or error if the credentials Value failed
-// to be retrieved.
+// GetWithContext returns the credentials value, or error if the credentials
+// Value failed to be retrieved. Will return early if the passed in context is
+// canceled.
//
// Will return the cached credentials Value if it has not expired. If the
// credentials Value has expired the Provider's Retrieve() will be called
@@ -222,31 +230,56 @@ func NewCredentials(provider Provider) *Credentials {
//
// If Credentials.Expire() was called the credentials Value will be force
// expired, and the next call to Get() will cause them to be refreshed.
-func (c *Credentials) Get() (Value, error) {
- // Check the cached credentials first with just the read lock.
- c.m.RLock()
- if !c.isExpired() {
- creds := c.creds
- c.m.RUnlock()
- return creds, nil
+//
+// Passed in Context is equivalent to aws.Context, and context.Context.
+func (c *Credentials) GetWithContext(ctx Context) (Value, error) {
+ if curCreds := c.creds.Load(); !c.isExpired(curCreds) {
+ return curCreds.(Value), nil
}
- c.m.RUnlock()
-
- // Credentials are expired need to retrieve the credentials taking the full
- // lock.
- c.m.Lock()
- defer c.m.Unlock()
-
- if c.isExpired() {
- creds, err := c.provider.Retrieve()
- if err != nil {
- return Value{}, err
- }
- c.creds = creds
- c.forceRefresh = false
+
+ // Cannot pass context down to the actual retrieve, because the first
+ // context would cancel the whole group when there is not direct
+ // association of items in the group.
+ resCh := c.sf.DoChan("", func() (interface{}, error) {
+ return c.singleRetrieve(&suppressedContext{ctx})
+ })
+ select {
+ case res := <-resCh:
+ return res.Val.(Value), res.Err
+ case <-ctx.Done():
+ return Value{}, awserr.New("RequestCanceled",
+ "request context canceled", ctx.Err())
}
+}
- return c.creds, nil
+func (c *Credentials) singleRetrieve(ctx Context) (creds interface{}, err error) {
+ if curCreds := c.creds.Load(); !c.isExpired(curCreds) {
+ return curCreds.(Value), nil
+ }
+
+ if p, ok := c.provider.(ProviderWithContext); ok {
+ creds, err = p.RetrieveWithContext(ctx)
+ } else {
+ creds, err = c.provider.Retrieve()
+ }
+ if err == nil {
+ c.creds.Store(creds)
+ }
+
+ return creds, err
+}
+
+// Get returns the credentials value, or error if the credentials Value failed
+// to be retrieved.
+//
+// Will return the cached credentials Value if it has not expired. If the
+// credentials Value has expired the Provider's Retrieve() will be called
+// to refresh the credentials.
+//
+// If Credentials.Expire() was called the credentials Value will be force
+// expired, and the next call to Get() will cause them to be refreshed.
+func (c *Credentials) Get() (Value, error) {
+ return c.GetWithContext(backgroundContext())
}
// Expire expires the credentials and forces them to be retrieved on the
@@ -255,10 +288,7 @@ func (c *Credentials) Get() (Value, error) {
// This will override the Provider's expired state, and force Credentials
// to call the Provider's Retrieve().
func (c *Credentials) Expire() {
- c.m.Lock()
- defer c.m.Unlock()
-
- c.forceRefresh = true
+ c.creds.Store(Value{})
}
// IsExpired returns if the credentials are no longer valid, and need
@@ -267,33 +297,43 @@ func (c *Credentials) Expire() {
// If the Credentials were forced to be expired with Expire() this will
// reflect that override.
func (c *Credentials) IsExpired() bool {
- c.m.RLock()
- defer c.m.RUnlock()
-
- return c.isExpired()
+ return c.isExpired(c.creds.Load())
}
// isExpired helper method wrapping the definition of expired credentials.
-func (c *Credentials) isExpired() bool {
- return c.forceRefresh || c.provider.IsExpired()
+func (c *Credentials) isExpired(creds interface{}) bool {
+ return creds == nil || creds.(Value) == Value{} || c.provider.IsExpired()
}
// ExpiresAt provides access to the functionality of the Expirer interface of
// the underlying Provider, if it supports that interface. Otherwise, it returns
// an error.
func (c *Credentials) ExpiresAt() (time.Time, error) {
- c.m.RLock()
- defer c.m.RUnlock()
-
expirer, ok := c.provider.(Expirer)
if !ok {
return time.Time{}, awserr.New("ProviderNotExpirer",
- fmt.Sprintf("provider %s does not support ExpiresAt()", c.creds.ProviderName),
+ fmt.Sprintf("provider %s does not support ExpiresAt()", c.creds.Load().(Value).ProviderName),
nil)
}
- if c.forceRefresh {
+ if c.creds.Load().(Value) == (Value{}) {
// set expiration time to the distant past
return time.Time{}, nil
}
return expirer.ExpiresAt(), nil
}
+
+type suppressedContext struct {
+ Context
+}
+
+func (s *suppressedContext) Deadline() (deadline time.Time, ok bool) {
+ return time.Time{}, false
+}
+
+func (s *suppressedContext) Done() <-chan struct{} {
+ return nil
+}
+
+func (s *suppressedContext) Err() error {
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
index 43d4ed38..92af5b72 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
@@ -7,6 +7,7 @@ import (
"strings"
"time"
+ "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/credentials"
@@ -87,7 +88,14 @@ func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*
// Error will be returned if the request fails, or unable to extract
// the desired credentials.
func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) {
- credsList, err := requestCredList(m.Client)
+ return m.RetrieveWithContext(aws.BackgroundContext())
+}
+
+// RetrieveWithContext retrieves credentials from the EC2 service.
+// Error will be returned if the request fails, or unable to extract
+// the desired credentials.
+func (m *EC2RoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) {
+ credsList, err := requestCredList(ctx, m.Client)
if err != nil {
return credentials.Value{ProviderName: ProviderName}, err
}
@@ -97,7 +105,7 @@ func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) {
}
credsName := credsList[0]
- roleCreds, err := requestCred(m.Client, credsName)
+ roleCreds, err := requestCred(ctx, m.Client, credsName)
if err != nil {
return credentials.Value{ProviderName: ProviderName}, err
}
@@ -130,8 +138,8 @@ const iamSecurityCredsPath = "iam/security-credentials/"
// requestCredList requests a list of credentials from the EC2 service.
// If there are no credentials, or there is an error making or receiving the request
-func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) {
- resp, err := client.GetMetadata(iamSecurityCredsPath)
+func requestCredList(ctx aws.Context, client *ec2metadata.EC2Metadata) ([]string, error) {
+ resp, err := client.GetMetadataWithContext(ctx, iamSecurityCredsPath)
if err != nil {
return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err)
}
@@ -154,8 +162,8 @@ func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) {
//
// If the credentials cannot be found, or there is an error reading the response
// and error will be returned.
-func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) {
- resp, err := client.GetMetadata(sdkuri.PathJoin(iamSecurityCredsPath, credsName))
+func requestCred(ctx aws.Context, client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) {
+ resp, err := client.GetMetadataWithContext(ctx, sdkuri.PathJoin(iamSecurityCredsPath, credsName))
if err != nil {
return ec2RoleCredRespBody{},
awserr.New("EC2RoleRequestError",
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go
index 1a7af53a..785f30d8 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go
@@ -116,7 +116,13 @@ func (p *Provider) IsExpired() bool {
// Retrieve will attempt to request the credentials from the endpoint the Provider
// was configured for. And error will be returned if the retrieval fails.
func (p *Provider) Retrieve() (credentials.Value, error) {
- resp, err := p.getCredentials()
+ return p.RetrieveWithContext(aws.BackgroundContext())
+}
+
+// RetrieveWithContext will attempt to request the credentials from the endpoint the Provider
+// was configured for. And error will be returned if the retrieval fails.
+func (p *Provider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) {
+ resp, err := p.getCredentials(ctx)
if err != nil {
return credentials.Value{ProviderName: ProviderName},
awserr.New("CredentialsEndpointError", "failed to load credentials", err)
@@ -148,7 +154,7 @@ type errorOutput struct {
Message string `json:"message"`
}
-func (p *Provider) getCredentials() (*getCredentialsOutput, error) {
+func (p *Provider) getCredentials(ctx aws.Context) (*getCredentialsOutput, error) {
op := &request.Operation{
Name: "GetCredentials",
HTTPMethod: "GET",
@@ -156,6 +162,7 @@ func (p *Provider) getCredentials() (*getCredentialsOutput, error) {
out := &getCredentialsOutput{}
req := p.Client.NewRequest(op, nil, out)
+ req.SetContext(ctx)
req.HTTPRequest.Header.Set("Accept", "application/json")
if authToken := p.AuthorizationToken; len(authToken) != 0 {
req.HTTPRequest.Header.Set("Authorization", authToken)
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go
index 1980c8c1..e6248360 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go
@@ -90,6 +90,7 @@ import (
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/internal/sdkio"
)
const (
@@ -142,7 +143,7 @@ const (
// DefaultBufSize limits buffer size from growing to an enormous
// amount due to a faulty process.
- DefaultBufSize = 1024
+ DefaultBufSize = int(8 * sdkio.KibiByte)
// DefaultTimeout default limit on time a process can run.
DefaultTimeout = time.Duration(1) * time.Minute
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go
index e1551495..22b5c5d9 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go
@@ -17,8 +17,9 @@ var (
ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil)
)
-// A SharedCredentialsProvider retrieves credentials from the current user's home
-// directory, and keeps track if those credentials are expired.
+// A SharedCredentialsProvider retrieves access key pair (access key ID,
+// secret access key, and session token if present) credentials from the current
+// user's home directory, and keeps track if those credentials are expired.
//
// Profile ini file example: $HOME/.aws/credentials
type SharedCredentialsProvider struct {
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go
index 531139e3..cbba1e3d 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go
@@ -19,7 +19,9 @@ type StaticProvider struct {
}
// NewStaticCredentials returns a pointer to a new Credentials object
-// wrapping a static credentials value provider.
+// wrapping a static credentials value provider. Token is only required
+// for temporary security credentials retrieved via STS, otherwise an empty
+// string can be passed for this parameter.
func NewStaticCredentials(id, secret, token string) *Credentials {
return NewCredentials(&StaticProvider{Value: Value{
AccessKeyID: id,
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
index 2e528d13..6846ef6f 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
@@ -87,6 +87,7 @@ import (
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/internal/sdkrand"
"github.com/aws/aws-sdk-go/service/sts"
)
@@ -118,6 +119,10 @@ type AssumeRoler interface {
AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error)
}
+type assumeRolerWithContext interface {
+ AssumeRoleWithContext(aws.Context, *sts.AssumeRoleInput, ...request.Option) (*sts.AssumeRoleOutput, error)
+}
+
// DefaultDuration is the default amount of time in minutes that the credentials
// will be valid for.
var DefaultDuration = time.Duration(15) * time.Minute
@@ -144,6 +149,13 @@ type AssumeRoleProvider struct {
// Session name, if you wish to reuse the credentials elsewhere.
RoleSessionName string
+ // Optional, you can pass tag key-value pairs to your session. These tags are called session tags.
+ Tags []*sts.Tag
+
+ // A list of keys for session tags that you want to set as transitive.
+ // If you set a tag key as transitive, the corresponding key and value passes to subsequent sessions in a role chain.
+ TransitiveTagKeys []*string
+
// Expiry duration of the STS credentials. Defaults to 15 minutes if not set.
Duration time.Duration
@@ -157,6 +169,29 @@ type AssumeRoleProvider struct {
// size.
Policy *string
+ // The ARNs of IAM managed policies you want to use as managed session policies.
+ // The policies must exist in the same account as the role.
+ //
+ // This parameter is optional. You can provide up to 10 managed policy ARNs.
+ // However, the plain text that you use for both inline and managed session
+ // policies can't exceed 2,048 characters.
+ //
+ // An AWS conversion compresses the passed session policies and session tags
+ // into a packed binary format that has a separate limit. Your request can fail
+ // for this limit even if your plain text meets the other requirements. The
+ // PackedPolicySize response element indicates by percentage how close the policies
+ // and tags for your request are to the upper size limit.
+ //
+ // Passing policies to this operation returns new temporary credentials. The
+ // resulting session's permissions are the intersection of the role's identity-based
+ // policy and the session policies. You can use the role's temporary credentials
+ // in subsequent AWS API calls to access resources in the account that owns
+ // the role. You cannot use session policies to grant more permissions than
+ // those allowed by the identity-based policy of the role that is being assumed.
+ // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+ // in the IAM User Guide.
+ PolicyArns []*sts.PolicyDescriptorType
+
// The identification number of the MFA device that is associated with the user
// who is making the AssumeRole call. Specify this value if the trust policy
// of the role being assumed includes a condition that requires MFA authentication.
@@ -258,6 +293,11 @@ func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*
// Retrieve generates a new set of temporary credentials using STS.
func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) {
+ return p.RetrieveWithContext(aws.BackgroundContext())
+}
+
+// RetrieveWithContext generates a new set of temporary credentials using STS.
+func (p *AssumeRoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) {
// Apply defaults where parameters are not set.
if p.RoleSessionName == "" {
// Try to work out a role name that will hopefully end up unique.
@@ -269,10 +309,13 @@ func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) {
}
jitter := time.Duration(sdkrand.SeededRand.Float64() * p.MaxJitterFrac * float64(p.Duration))
input := &sts.AssumeRoleInput{
- DurationSeconds: aws.Int64(int64((p.Duration - jitter) / time.Second)),
- RoleArn: aws.String(p.RoleARN),
- RoleSessionName: aws.String(p.RoleSessionName),
- ExternalId: p.ExternalID,
+ DurationSeconds: aws.Int64(int64((p.Duration - jitter) / time.Second)),
+ RoleArn: aws.String(p.RoleARN),
+ RoleSessionName: aws.String(p.RoleSessionName),
+ ExternalId: p.ExternalID,
+ Tags: p.Tags,
+ PolicyArns: p.PolicyArns,
+ TransitiveTagKeys: p.TransitiveTagKeys,
}
if p.Policy != nil {
input.Policy = p.Policy
@@ -295,7 +338,15 @@ func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) {
}
}
- roleOutput, err := p.Client.AssumeRole(input)
+ var roleOutput *sts.AssumeRoleOutput
+ var err error
+
+ if c, ok := p.Client.(assumeRolerWithContext); ok {
+ roleOutput, err = c.AssumeRoleWithContext(ctx, input)
+ } else {
+ roleOutput, err = p.Client.AssumeRole(input)
+ }
+
if err != nil {
return credentials.Value{ProviderName: ProviderName}, err
}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go
index b20b6339..6feb262b 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go
@@ -28,15 +28,34 @@ const (
// compare test values.
var now = time.Now
+// TokenFetcher shuold return WebIdentity token bytes or an error
+type TokenFetcher interface {
+ FetchToken(credentials.Context) ([]byte, error)
+}
+
+// FetchTokenPath is a path to a WebIdentity token file
+type FetchTokenPath string
+
+// FetchToken returns a token by reading from the filesystem
+func (f FetchTokenPath) FetchToken(ctx credentials.Context) ([]byte, error) {
+ data, err := ioutil.ReadFile(string(f))
+ if err != nil {
+ errMsg := fmt.Sprintf("unable to read file at %s", f)
+ return nil, awserr.New(ErrCodeWebIdentity, errMsg, err)
+ }
+ return data, nil
+}
+
// WebIdentityRoleProvider is used to retrieve credentials using
// an OIDC token.
type WebIdentityRoleProvider struct {
credentials.Expiry
+ PolicyArns []*sts.PolicyDescriptorType
client stsiface.STSAPI
ExpiryWindow time.Duration
- tokenFilePath string
+ tokenFetcher TokenFetcher
roleARN string
roleSessionName string
}
@@ -52,9 +71,15 @@ func NewWebIdentityCredentials(c client.ConfigProvider, roleARN, roleSessionName
// NewWebIdentityRoleProvider will return a new WebIdentityRoleProvider with the
// provided stsiface.STSAPI
func NewWebIdentityRoleProvider(svc stsiface.STSAPI, roleARN, roleSessionName, path string) *WebIdentityRoleProvider {
+ return NewWebIdentityRoleProviderWithToken(svc, roleARN, roleSessionName, FetchTokenPath(path))
+}
+
+// NewWebIdentityRoleProviderWithToken will return a new WebIdentityRoleProvider with the
+// provided stsiface.STSAPI and a TokenFetcher
+func NewWebIdentityRoleProviderWithToken(svc stsiface.STSAPI, roleARN, roleSessionName string, tokenFetcher TokenFetcher) *WebIdentityRoleProvider {
return &WebIdentityRoleProvider{
client: svc,
- tokenFilePath: path,
+ tokenFetcher: tokenFetcher,
roleARN: roleARN,
roleSessionName: roleSessionName,
}
@@ -64,10 +89,16 @@ func NewWebIdentityRoleProvider(svc stsiface.STSAPI, roleARN, roleSessionName, p
// 'WebIdentityTokenFilePath' specified destination and if that is empty an
// error will be returned.
func (p *WebIdentityRoleProvider) Retrieve() (credentials.Value, error) {
- b, err := ioutil.ReadFile(p.tokenFilePath)
+ return p.RetrieveWithContext(aws.BackgroundContext())
+}
+
+// RetrieveWithContext will attempt to assume a role from a token which is located at
+// 'WebIdentityTokenFilePath' specified destination and if that is empty an
+// error will be returned.
+func (p *WebIdentityRoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) {
+ b, err := p.tokenFetcher.FetchToken(ctx)
if err != nil {
- errMsg := fmt.Sprintf("unable to read file at %s", p.tokenFilePath)
- return credentials.Value{}, awserr.New(ErrCodeWebIdentity, errMsg, err)
+ return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed fetching WebIdentity token: ", err)
}
sessionName := p.roleSessionName
@@ -77,10 +108,14 @@ func (p *WebIdentityRoleProvider) Retrieve() (credentials.Value, error) {
sessionName = strconv.FormatInt(now().UnixNano(), 10)
}
req, resp := p.client.AssumeRoleWithWebIdentityRequest(&sts.AssumeRoleWithWebIdentityInput{
+ PolicyArns: p.PolicyArns,
RoleArn: &p.roleARN,
RoleSessionName: &sessionName,
WebIdentityToken: aws.String(string(b)),
})
+
+ req.SetContext(ctx)
+
// InvalidIdentityToken error is a temporary error that can occur
// when assuming an Role with a JWT web identity token.
req.RetryErrorCodes = append(req.RetryErrorCodes, sts.ErrCodeInvalidIdentityTokenException)
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go
index c7008d8c..835bcd49 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go
@@ -66,7 +66,6 @@ func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) {
XAmzRequestID: aws.String(r.RequestID),
- AttemptCount: aws.Int(r.RetryCount + 1),
AttemptLatency: aws.Int(int(now.Sub(r.AttemptTime).Nanoseconds() / int64(time.Millisecond))),
AccessKey: aws.String(creds.AccessKeyID),
}
@@ -90,7 +89,7 @@ func getMetricException(err awserr.Error) metricException {
code := err.Code()
switch code {
- case "RequestError",
+ case request.ErrCodeRequestError,
request.ErrCodeSerialization,
request.CanceledErrorCode:
return sdkException{
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
index d126764c..a716c021 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
@@ -4,28 +4,73 @@ import (
"encoding/json"
"fmt"
"net/http"
+ "strconv"
"strings"
"time"
+ "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/internal/sdkuri"
)
+// getToken uses the duration to return a token for EC2 metadata service,
+// or an error if the request failed.
+func (c *EC2Metadata) getToken(ctx aws.Context, duration time.Duration) (tokenOutput, error) {
+ op := &request.Operation{
+ Name: "GetToken",
+ HTTPMethod: "PUT",
+ HTTPPath: "/api/token",
+ }
+
+ var output tokenOutput
+ req := c.NewRequest(op, nil, &output)
+ req.SetContext(ctx)
+
+ // remove the fetch token handler from the request handlers to avoid infinite recursion
+ req.Handlers.Sign.RemoveByName(fetchTokenHandlerName)
+
+ // Swap the unmarshalMetadataHandler with unmarshalTokenHandler on this request.
+ req.Handlers.Unmarshal.Swap(unmarshalMetadataHandlerName, unmarshalTokenHandler)
+
+ ttl := strconv.FormatInt(int64(duration/time.Second), 10)
+ req.HTTPRequest.Header.Set(ttlHeader, ttl)
+
+ err := req.Send()
+
+ // Errors with bad request status should be returned.
+ if err != nil {
+ err = awserr.NewRequestFailure(
+ awserr.New(req.HTTPResponse.Status, http.StatusText(req.HTTPResponse.StatusCode), err),
+ req.HTTPResponse.StatusCode, req.RequestID)
+ }
+
+ return output, err
+}
+
// GetMetadata uses the path provided to request information from the EC2
-// instance metdata service. The content will be returned as a string, or
+// instance metadata service. The content will be returned as a string, or
// error if the request failed.
func (c *EC2Metadata) GetMetadata(p string) (string, error) {
+ return c.GetMetadataWithContext(aws.BackgroundContext(), p)
+}
+
+// GetMetadataWithContext uses the path provided to request information from the EC2
+// instance metadata service. The content will be returned as a string, or
+// error if the request failed.
+func (c *EC2Metadata) GetMetadataWithContext(ctx aws.Context, p string) (string, error) {
op := &request.Operation{
Name: "GetMetadata",
HTTPMethod: "GET",
HTTPPath: sdkuri.PathJoin("/meta-data", p),
}
-
output := &metadataOutput{}
+
req := c.NewRequest(op, nil, output)
- err := req.Send()
+ req.SetContext(ctx)
+
+ err := req.Send()
return output.Content, err
}
@@ -33,6 +78,13 @@ func (c *EC2Metadata) GetMetadata(p string) (string, error) {
// there is no user-data setup for the EC2 instance a "NotFoundError" error
// code will be returned.
func (c *EC2Metadata) GetUserData() (string, error) {
+ return c.GetUserDataWithContext(aws.BackgroundContext())
+}
+
+// GetUserDataWithContext returns the userdata that was configured for the service. If
+// there is no user-data setup for the EC2 instance a "NotFoundError" error
+// code will be returned.
+func (c *EC2Metadata) GetUserDataWithContext(ctx aws.Context) (string, error) {
op := &request.Operation{
Name: "GetUserData",
HTTPMethod: "GET",
@@ -41,13 +93,9 @@ func (c *EC2Metadata) GetUserData() (string, error) {
output := &metadataOutput{}
req := c.NewRequest(op, nil, output)
- req.Handlers.UnmarshalError.PushBack(func(r *request.Request) {
- if r.HTTPResponse.StatusCode == http.StatusNotFound {
- r.Error = awserr.New("NotFoundError", "user-data not found", r.Error)
- }
- })
- err := req.Send()
+ req.SetContext(ctx)
+ err := req.Send()
return output.Content, err
}
@@ -55,6 +103,13 @@ func (c *EC2Metadata) GetUserData() (string, error) {
// instance metadata service for dynamic data. The content will be returned
// as a string, or error if the request failed.
func (c *EC2Metadata) GetDynamicData(p string) (string, error) {
+ return c.GetDynamicDataWithContext(aws.BackgroundContext(), p)
+}
+
+// GetDynamicDataWithContext uses the path provided to request information from the EC2
+// instance metadata service for dynamic data. The content will be returned
+// as a string, or error if the request failed.
+func (c *EC2Metadata) GetDynamicDataWithContext(ctx aws.Context, p string) (string, error) {
op := &request.Operation{
Name: "GetDynamicData",
HTTPMethod: "GET",
@@ -63,8 +118,9 @@ func (c *EC2Metadata) GetDynamicData(p string) (string, error) {
output := &metadataOutput{}
req := c.NewRequest(op, nil, output)
- err := req.Send()
+ req.SetContext(ctx)
+ err := req.Send()
return output.Content, err
}
@@ -72,7 +128,14 @@ func (c *EC2Metadata) GetDynamicData(p string) (string, error) {
// instance. Error is returned if the request fails or is unable to parse
// the response.
func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) {
- resp, err := c.GetDynamicData("instance-identity/document")
+ return c.GetInstanceIdentityDocumentWithContext(aws.BackgroundContext())
+}
+
+// GetInstanceIdentityDocumentWithContext retrieves an identity document describing an
+// instance. Error is returned if the request fails or is unable to parse
+// the response.
+func (c *EC2Metadata) GetInstanceIdentityDocumentWithContext(ctx aws.Context) (EC2InstanceIdentityDocument, error) {
+ resp, err := c.GetDynamicDataWithContext(ctx, "instance-identity/document")
if err != nil {
return EC2InstanceIdentityDocument{},
awserr.New("EC2MetadataRequestError",
@@ -91,7 +154,12 @@ func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument
// IAMInfo retrieves IAM info from the metadata API
func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) {
- resp, err := c.GetMetadata("iam/info")
+ return c.IAMInfoWithContext(aws.BackgroundContext())
+}
+
+// IAMInfoWithContext retrieves IAM info from the metadata API
+func (c *EC2Metadata) IAMInfoWithContext(ctx aws.Context) (EC2IAMInfo, error) {
+ resp, err := c.GetMetadataWithContext(ctx, "iam/info")
if err != nil {
return EC2IAMInfo{},
awserr.New("EC2MetadataRequestError",
@@ -116,24 +184,36 @@ func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) {
// Region returns the region the instance is running in.
func (c *EC2Metadata) Region() (string, error) {
- resp, err := c.GetMetadata("placement/availability-zone")
+ return c.RegionWithContext(aws.BackgroundContext())
+}
+
+// RegionWithContext returns the region the instance is running in.
+func (c *EC2Metadata) RegionWithContext(ctx aws.Context) (string, error) {
+ ec2InstanceIdentityDocument, err := c.GetInstanceIdentityDocumentWithContext(ctx)
if err != nil {
return "", err
}
-
- if len(resp) == 0 {
- return "", awserr.New("EC2MetadataError", "invalid Region response", nil)
+ // extract region from the ec2InstanceIdentityDocument
+ region := ec2InstanceIdentityDocument.Region
+ if len(region) == 0 {
+ return "", awserr.New("EC2MetadataError", "invalid region received for ec2metadata instance", nil)
}
-
- // returns region without the suffix. Eg: us-west-2a becomes us-west-2
- return resp[:len(resp)-1], nil
+ // returns region
+ return region, nil
}
// Available returns if the application has access to the EC2 Metadata service.
// Can be used to determine if application is running within an EC2 Instance and
// the metadata service is available.
func (c *EC2Metadata) Available() bool {
- if _, err := c.GetMetadata("instance-id"); err != nil {
+ return c.AvailableWithContext(aws.BackgroundContext())
+}
+
+// AvailableWithContext returns if the application has access to the EC2 Metadata service.
+// Can be used to determine if application is running within an EC2 Instance and
+// the metadata service is available.
+func (c *EC2Metadata) AvailableWithContext(ctx aws.Context) bool {
+ if _, err := c.GetMetadataWithContext(ctx, "instance-id"); err != nil {
return false
}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
index 4c5636e3..dc7e051e 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
@@ -13,6 +13,7 @@ import (
"io"
"net/http"
"os"
+ "strconv"
"strings"
"time"
@@ -24,9 +25,25 @@ import (
"github.com/aws/aws-sdk-go/aws/request"
)
-// ServiceName is the name of the service.
-const ServiceName = "ec2metadata"
-const disableServiceEnvVar = "AWS_EC2_METADATA_DISABLED"
+const (
+ // ServiceName is the name of the service.
+ ServiceName = "ec2metadata"
+ disableServiceEnvVar = "AWS_EC2_METADATA_DISABLED"
+
+ // Headers for Token and TTL
+ ttlHeader = "x-aws-ec2-metadata-token-ttl-seconds"
+ tokenHeader = "x-aws-ec2-metadata-token"
+
+ // Named Handler constants
+ fetchTokenHandlerName = "FetchTokenHandler"
+ unmarshalMetadataHandlerName = "unmarshalMetadataHandler"
+ unmarshalTokenHandlerName = "unmarshalTokenHandler"
+ enableTokenProviderHandlerName = "enableTokenProviderHandler"
+
+ // TTL constants
+ defaultTTL = 21600 * time.Second
+ ttlExpirationWindow = 30 * time.Second
+)
// A EC2Metadata is an EC2 Metadata service Client.
type EC2Metadata struct {
@@ -63,8 +80,10 @@ func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio
// use a shorter timeout than default because the metadata
// service is local if it is running, and to fail faster
// if not running on an ec2 instance.
- Timeout: 5 * time.Second,
+ Timeout: 1 * time.Second,
}
+ // max number of retries on the client operation
+ cfg.MaxRetries = aws.Int(2)
}
svc := &EC2Metadata{
@@ -80,13 +99,27 @@ func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio
),
}
- svc.Handlers.Unmarshal.PushBack(unmarshalHandler)
+ // token provider instance
+ tp := newTokenProvider(svc, defaultTTL)
+
+ // NamedHandler for fetching token
+ svc.Handlers.Sign.PushBackNamed(request.NamedHandler{
+ Name: fetchTokenHandlerName,
+ Fn: tp.fetchTokenHandler,
+ })
+ // NamedHandler for enabling token provider
+ svc.Handlers.Complete.PushBackNamed(request.NamedHandler{
+ Name: enableTokenProviderHandlerName,
+ Fn: tp.enableTokenProviderHandler,
+ })
+
+ svc.Handlers.Unmarshal.PushBackNamed(unmarshalHandler)
svc.Handlers.UnmarshalError.PushBack(unmarshalError)
svc.Handlers.Validate.Clear()
svc.Handlers.Validate.PushBack(validateEndpointHandler)
// Disable the EC2 Metadata service if the environment variable is set.
- // This shortcirctes the service's functionality to always fail to send
+ // This short-circuits the service's functionality to always fail to send
// requests.
if strings.ToLower(os.Getenv(disableServiceEnvVar)) == "true" {
svc.Handlers.Send.SwapNamed(request.NamedHandler{
@@ -107,7 +140,6 @@ func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio
for _, option := range opts {
option(svc.Client)
}
-
return svc
}
@@ -119,30 +151,74 @@ type metadataOutput struct {
Content string
}
-func unmarshalHandler(r *request.Request) {
- defer r.HTTPResponse.Body.Close()
- b := &bytes.Buffer{}
- if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
- r.Error = awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata response", err)
- return
- }
+type tokenOutput struct {
+ Token string
+ TTL time.Duration
+}
- if data, ok := r.Data.(*metadataOutput); ok {
- data.Content = b.String()
- }
+// unmarshal token handler is used to parse the response of a getToken operation
+var unmarshalTokenHandler = request.NamedHandler{
+ Name: unmarshalTokenHandlerName,
+ Fn: func(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+ var b bytes.Buffer
+ if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil {
+ r.Error = awserr.NewRequestFailure(awserr.New(request.ErrCodeSerialization,
+ "unable to unmarshal EC2 metadata response", err), r.HTTPResponse.StatusCode, r.RequestID)
+ return
+ }
+
+ v := r.HTTPResponse.Header.Get(ttlHeader)
+ data, ok := r.Data.(*tokenOutput)
+ if !ok {
+ return
+ }
+
+ data.Token = b.String()
+ // TTL is in seconds
+ i, err := strconv.ParseInt(v, 10, 64)
+ if err != nil {
+ r.Error = awserr.NewRequestFailure(awserr.New(request.ParamFormatErrCode,
+ "unable to parse EC2 token TTL response", err), r.HTTPResponse.StatusCode, r.RequestID)
+ return
+ }
+ t := time.Duration(i) * time.Second
+ data.TTL = t
+ },
+}
+
+var unmarshalHandler = request.NamedHandler{
+ Name: unmarshalMetadataHandlerName,
+ Fn: func(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+ var b bytes.Buffer
+ if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil {
+ r.Error = awserr.NewRequestFailure(awserr.New(request.ErrCodeSerialization,
+ "unable to unmarshal EC2 metadata response", err), r.HTTPResponse.StatusCode, r.RequestID)
+ return
+ }
+
+ if data, ok := r.Data.(*metadataOutput); ok {
+ data.Content = b.String()
+ }
+ },
}
func unmarshalError(r *request.Request) {
defer r.HTTPResponse.Body.Close()
- b := &bytes.Buffer{}
- if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
- r.Error = awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata error response", err)
+ var b bytes.Buffer
+
+ if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil {
+ r.Error = awserr.NewRequestFailure(
+ awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata error response", err),
+ r.HTTPResponse.StatusCode, r.RequestID)
return
}
// Response body format is not consistent between metadata endpoints.
// Grab the error message as a string and include that as the source error
- r.Error = awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String()))
+ r.Error = awserr.NewRequestFailure(awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String())),
+ r.HTTPResponse.StatusCode, r.RequestID)
}
func validateEndpointHandler(r *request.Request) {
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go
new file mode 100644
index 00000000..d0a3a020
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go
@@ -0,0 +1,92 @@
+package ec2metadata
+
+import (
+ "net/http"
+ "sync/atomic"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// A tokenProvider struct provides access to EC2Metadata client
+// and atomic instance of a token, along with configuredTTL for it.
+// tokenProvider also provides an atomic flag to disable the
+// fetch token operation.
+// The disabled member will use 0 as false, and 1 as true.
+type tokenProvider struct {
+ client *EC2Metadata
+ token atomic.Value
+ configuredTTL time.Duration
+ disabled uint32
+}
+
+// A ec2Token struct helps use of token in EC2 Metadata service ops
+type ec2Token struct {
+ token string
+ credentials.Expiry
+}
+
+// newTokenProvider provides a pointer to a tokenProvider instance
+func newTokenProvider(c *EC2Metadata, duration time.Duration) *tokenProvider {
+ return &tokenProvider{client: c, configuredTTL: duration}
+}
+
+// fetchTokenHandler fetches token for EC2Metadata service client by default.
+func (t *tokenProvider) fetchTokenHandler(r *request.Request) {
+
+ // short-circuits to insecure data flow if tokenProvider is disabled.
+ if v := atomic.LoadUint32(&t.disabled); v == 1 {
+ return
+ }
+
+ if ec2Token, ok := t.token.Load().(ec2Token); ok && !ec2Token.IsExpired() {
+ r.HTTPRequest.Header.Set(tokenHeader, ec2Token.token)
+ return
+ }
+
+ output, err := t.client.getToken(r.Context(), t.configuredTTL)
+
+ if err != nil {
+
+ // change the disabled flag on token provider to true,
+ // when error is request timeout error.
+ if requestFailureError, ok := err.(awserr.RequestFailure); ok {
+ switch requestFailureError.StatusCode() {
+ case http.StatusForbidden, http.StatusNotFound, http.StatusMethodNotAllowed:
+ atomic.StoreUint32(&t.disabled, 1)
+ case http.StatusBadRequest:
+ r.Error = requestFailureError
+ }
+
+ // Check if request timed out while waiting for response
+ if e, ok := requestFailureError.OrigErr().(awserr.Error); ok {
+ if e.Code() == request.ErrCodeRequestError {
+ atomic.StoreUint32(&t.disabled, 1)
+ }
+ }
+ }
+ return
+ }
+
+ newToken := ec2Token{
+ token: output.Token,
+ }
+ newToken.SetExpiration(time.Now().Add(output.TTL), ttlExpirationWindow)
+ t.token.Store(newToken)
+
+ // Inject token header to the request.
+ if ec2Token, ok := t.token.Load().(ec2Token); ok {
+ r.HTTPRequest.Header.Set(tokenHeader, ec2Token.token)
+ }
+}
+
+// enableTokenProviderHandler enables the token provider
+func (t *tokenProvider) enableTokenProviderHandler(r *request.Request) {
+ // If the error code status is 401, we enable the token provider
+ if e, ok := r.Error.(awserr.RequestFailure); ok && e != nil &&
+ e.StatusCode() == http.StatusUnauthorized {
+ atomic.StoreUint32(&t.disabled, 0)
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go
index 87b9ff3f..654fb1ad 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go
@@ -83,6 +83,7 @@ func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resol
p := &ps[i]
custAddEC2Metadata(p)
custAddS3DualStack(p)
+ custRegionalS3(p)
custRmIotDataService(p)
custFixAppAutoscalingChina(p)
custFixAppAutoscalingUsGov(p)
@@ -92,7 +93,7 @@ func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resol
}
func custAddS3DualStack(p *partition) {
- if p.ID != "aws" {
+ if !(p.ID == "aws" || p.ID == "aws-cn" || p.ID == "aws-us-gov") {
return
}
@@ -100,6 +101,33 @@ func custAddS3DualStack(p *partition) {
custAddDualstack(p, "s3-control")
}
+func custRegionalS3(p *partition) {
+ if p.ID != "aws" {
+ return
+ }
+
+ service, ok := p.Services["s3"]
+ if !ok {
+ return
+ }
+
+ // If global endpoint already exists no customization needed.
+ if _, ok := service.Endpoints["aws-global"]; ok {
+ return
+ }
+
+ service.PartitionEndpoint = "aws-global"
+ service.Endpoints["us-east-1"] = endpoint{}
+ service.Endpoints["aws-global"] = endpoint{
+ Hostname: "s3.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ }
+
+ p.Services["s3"] = service
+}
+
func custAddDualstack(p *partition, svcName string) {
s, ok := p.Services[svcName]
if !ok {
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
index 452cefda..4f9de24f 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
@@ -17,6 +17,7 @@ const (
// AWS Standard partition's regions.
const (
+ AfSouth1RegionID = "af-south-1" // Africa (Cape Town).
ApEast1RegionID = "ap-east-1" // Asia Pacific (Hong Kong).
ApNortheast1RegionID = "ap-northeast-1" // Asia Pacific (Tokyo).
ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul).
@@ -24,11 +25,12 @@ const (
ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore).
ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney).
CaCentral1RegionID = "ca-central-1" // Canada (Central).
- EuCentral1RegionID = "eu-central-1" // EU (Frankfurt).
- EuNorth1RegionID = "eu-north-1" // EU (Stockholm).
- EuWest1RegionID = "eu-west-1" // EU (Ireland).
- EuWest2RegionID = "eu-west-2" // EU (London).
- EuWest3RegionID = "eu-west-3" // EU (Paris).
+ EuCentral1RegionID = "eu-central-1" // Europe (Frankfurt).
+ EuNorth1RegionID = "eu-north-1" // Europe (Stockholm).
+ EuSouth1RegionID = "eu-south-1" // Europe (Milan).
+ EuWest1RegionID = "eu-west-1" // Europe (Ireland).
+ EuWest2RegionID = "eu-west-2" // Europe (London).
+ EuWest3RegionID = "eu-west-3" // Europe (Paris).
MeSouth1RegionID = "me-south-1" // Middle East (Bahrain).
SaEast1RegionID = "sa-east-1" // South America (Sao Paulo).
UsEast1RegionID = "us-east-1" // US East (N. Virginia).
@@ -46,7 +48,7 @@ const (
// AWS GovCloud (US) partition's regions.
const (
UsGovEast1RegionID = "us-gov-east-1" // AWS GovCloud (US-East).
- UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US).
+ UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US-West).
)
// AWS ISO (US) partition's regions.
@@ -97,7 +99,7 @@ var awsPartition = partition{
DNSSuffix: "amazonaws.com",
RegionRegex: regionRegex{
Regexp: func() *regexp.Regexp {
- reg, _ := regexp.Compile("^(us|eu|ap|sa|ca|me)\\-\\w+\\-\\d+$")
+ reg, _ := regexp.Compile("^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$")
return reg
}(),
},
@@ -107,6 +109,9 @@ var awsPartition = partition{
SignatureVersions: []string{"v4"},
},
Regions: regions{
+ "af-south-1": region{
+ Description: "Africa (Cape Town)",
+ },
"ap-east-1": region{
Description: "Asia Pacific (Hong Kong)",
},
@@ -129,19 +134,22 @@ var awsPartition = partition{
Description: "Canada (Central)",
},
"eu-central-1": region{
- Description: "EU (Frankfurt)",
+ Description: "Europe (Frankfurt)",
},
"eu-north-1": region{
- Description: "EU (Stockholm)",
+ Description: "Europe (Stockholm)",
+ },
+ "eu-south-1": region{
+ Description: "Europe (Milan)",
},
"eu-west-1": region{
- Description: "EU (Ireland)",
+ Description: "Europe (Ireland)",
},
"eu-west-2": region{
- Description: "EU (London)",
+ Description: "Europe (London)",
},
"eu-west-3": region{
- Description: "EU (Paris)",
+ Description: "Europe (Paris)",
},
"me-south-1": region{
Description: "Middle East (Bahrain)",
@@ -169,9 +177,10 @@ var awsPartition = partition{
"us-east-1": endpoint{},
},
},
- "acm": service{
+ "access-analyzer": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -181,15 +190,101 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
- "me-south-1": endpoint{},
- "sa-east-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "fips-ca-central-1": endpoint{
+ Hostname: "access-analyzer-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ "fips-us-east-1": endpoint{
+ Hostname: "access-analyzer-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "access-analyzer-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "access-analyzer-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "access-analyzer-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "acm": service{
+
+ Endpoints: endpoints{
+ "af-south-1": endpoint{},
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "ca-central-1-fips": endpoint{
+ Hostname: "acm-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-1-fips": endpoint{
+ Hostname: "acm-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{},
+ "us-east-2-fips": endpoint{
+ Hostname: "acm-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-1": endpoint{},
+ "us-west-1-fips": endpoint{
+ Hostname: "acm-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "us-west-2": endpoint{},
+ "us-west-2-fips": endpoint{
+ Hostname: "acm-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
},
},
"acm-pca": service{
@@ -209,6 +304,61 @@ var awsPartition = partition{
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
+ "fips-ca-central-1": endpoint{
+ Hostname: "acm-pca-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ "fips-us-east-1": endpoint{
+ Hostname: "acm-pca-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "acm-pca-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "acm-pca-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "acm-pca-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "api.detective": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-1": endpoint{},
@@ -218,6 +368,12 @@ var awsPartition = partition{
"api.ecr": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{
+ Hostname: "api.ecr.af-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "af-south-1",
+ },
+ },
"ap-east-1": endpoint{
Hostname: "api.ecr.ap-east-1.amazonaws.com",
CredentialScope: credentialScope{
@@ -272,6 +428,12 @@ var awsPartition = partition{
Region: "eu-north-1",
},
},
+ "eu-south-1": endpoint{
+ Hostname: "api.ecr.eu-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-south-1",
+ },
+ },
"eu-west-1": endpoint{
Hostname: "api.ecr.eu-west-1.amazonaws.com",
CredentialScope: credentialScope{
@@ -290,6 +452,30 @@ var awsPartition = partition{
Region: "eu-west-3",
},
},
+ "fips-us-east-1": endpoint{
+ Hostname: "ecr-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "ecr-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "ecr-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "ecr-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
"me-south-1": endpoint{
Hostname: "api.ecr.me-south-1.amazonaws.com",
CredentialScope: credentialScope{
@@ -328,6 +514,29 @@ var awsPartition = partition{
},
},
},
+ "api.elastic-inference": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{
+ Hostname: "api.elastic-inference.ap-northeast-1.amazonaws.com",
+ },
+ "ap-northeast-2": endpoint{
+ Hostname: "api.elastic-inference.ap-northeast-2.amazonaws.com",
+ },
+ "eu-west-1": endpoint{
+ Hostname: "api.elastic-inference.eu-west-1.amazonaws.com",
+ },
+ "us-east-1": endpoint{
+ Hostname: "api.elastic-inference.us-east-1.amazonaws.com",
+ },
+ "us-east-2": endpoint{
+ Hostname: "api.elastic-inference.us-east-2.amazonaws.com",
+ },
+ "us-west-2": endpoint{
+ Hostname: "api.elastic-inference.us-west-2.amazonaws.com",
+ },
+ },
+ },
"api.mediatailor": service{
Endpoints: endpoints{
@@ -366,6 +575,7 @@ var awsPartition = partition{
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
"sa-east-1": endpoint{},
"us-east-1": endpoint{},
"us-east-1-fips": endpoint{
@@ -400,6 +610,7 @@ var awsPartition = partition{
"apigateway": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -409,6 +620,7 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
@@ -422,13 +634,10 @@ var awsPartition = partition{
},
"application-autoscaling": service{
Defaults: endpoint{
- Hostname: "autoscaling.{region}.amazonaws.com",
Protocols: []string{"http", "https"},
- CredentialScope: credentialScope{
- Service: "application-autoscaling",
- },
},
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -438,6 +647,7 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
@@ -452,6 +662,7 @@ var awsPartition = partition{
"appmesh": service{
Endpoints: endpoints{
+ "ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
"ap-south-1": endpoint{},
@@ -459,8 +670,12 @@ var awsPartition = partition{
"ap-southeast-2": endpoint{},
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-1": endpoint{},
@@ -477,12 +692,19 @@ var awsPartition = partition{
Endpoints: endpoints{
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
- "us-east-1": endpoint{},
- "us-west-2": endpoint{},
+ "fips": endpoint{
+ Hostname: "appstream2-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"appsync": service{
@@ -493,11 +715,17 @@ var awsPartition = partition{
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
"eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
+ "us-west-1": endpoint{},
"us-west-2": endpoint{},
},
},
@@ -515,8 +743,12 @@ var awsPartition = partition{
"eu-north-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
+ "us-west-1": endpoint{},
"us-west-2": endpoint{},
},
},
@@ -525,6 +757,7 @@ var awsPartition = partition{
Protocols: []string{"http", "https"},
},
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -534,6 +767,7 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
@@ -547,13 +781,10 @@ var awsPartition = partition{
},
"autoscaling-plans": service{
Defaults: endpoint{
- Hostname: "autoscaling.{region}.amazonaws.com",
Protocols: []string{"http", "https"},
- CredentialScope: credentialScope{
- Service: "autoscaling-plans",
- },
},
Endpoints: endpoints{
+ "ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
"ap-south-1": endpoint{},
@@ -561,8 +792,12 @@ var awsPartition = partition{
"ap-southeast-2": endpoint{},
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-1": endpoint{},
@@ -572,14 +807,20 @@ var awsPartition = partition{
"backup": service{
Endpoints: endpoints{
+ "ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-1": endpoint{},
@@ -598,15 +839,40 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
- "me-south-1": endpoint{},
- "sa-east-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "fips.batch.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "fips.batch.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "fips.batch.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "fips.batch.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"budgets": service{
@@ -655,12 +921,24 @@ var awsPartition = partition{
"cloud9": service{
Endpoints: endpoints{
+ "ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
"eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
- "us-east-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
"us-east-2": endpoint{},
+ "us-west-1": endpoint{},
"us-west-2": endpoint{},
},
},
@@ -681,6 +959,7 @@ var awsPartition = partition{
"cloudformation": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -690,15 +969,40 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
"me-south-1": endpoint{},
"sa-east-1": endpoint{},
"us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "us-east-1-fips": endpoint{
+ Hostname: "cloudformation-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{},
+ "us-east-2-fips": endpoint{
+ Hostname: "cloudformation-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-1": endpoint{},
+ "us-west-1-fips": endpoint{
+ Hostname: "cloudformation-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "us-west-2": endpoint{},
+ "us-west-2-fips": endpoint{
+ Hostname: "cloudformation-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
},
},
"cloudfront": service{
@@ -737,6 +1041,7 @@ var awsPartition = partition{
},
},
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -746,10 +1051,12 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
"me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-1": endpoint{},
@@ -774,6 +1081,7 @@ var awsPartition = partition{
"cloudtrail": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -783,14 +1091,54 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
- "me-south-1": endpoint{},
- "sa-east-1": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "cloudtrail-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "cloudtrail-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "cloudtrail-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "cloudtrail-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "codeartifact": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
- "us-west-1": endpoint{},
"us-west-2": endpoint{},
},
},
@@ -844,6 +1192,7 @@ var awsPartition = partition{
"codecommit": service{
Endpoints: endpoints{
+ "ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
"ap-south-1": endpoint{},
@@ -861,16 +1210,18 @@ var awsPartition = partition{
Region: "ca-central-1",
},
},
- "sa-east-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"codedeploy": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -880,6 +1231,7 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
@@ -929,24 +1281,76 @@ var awsPartition = partition{
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
- "sa-east-1": endpoint{},
+ "fips-ca-central-1": endpoint{
+ Hostname: "codepipeline-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ "fips-us-east-1": endpoint{
+ Hostname: "codepipeline-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "codepipeline-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "codepipeline-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "codepipeline-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "codestar": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-1": endpoint{},
"us-west-2": endpoint{},
},
},
- "codestar": service{
+ "codestar-connections": service{
Endpoints: endpoints{
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-1": endpoint{},
@@ -965,9 +1369,27 @@ var awsPartition = partition{
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-2": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "cognito-identity-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "cognito-identity-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "cognito-identity-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
},
},
"cognito-idp": service{
@@ -982,9 +1404,27 @@ var awsPartition = partition{
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-2": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "cognito-idp-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "cognito-idp-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "cognito-idp-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
},
},
"cognito-sync": service{
@@ -1008,15 +1448,36 @@ var awsPartition = partition{
Protocols: []string{"https"},
},
Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-2": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "comprehend-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "comprehend-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "comprehend-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
},
},
"comprehendmedical": service{
@@ -1026,9 +1487,27 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-2": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "comprehendmedical-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "comprehendmedical-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "comprehendmedical-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
},
},
"config": service{
@@ -1058,8 +1537,10 @@ var awsPartition = partition{
Endpoints: endpoints{
"ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
"eu-central-1": endpoint{},
+ "eu-west-2": endpoint{},
"us-east-1": endpoint{},
"us-west-2": endpoint{},
},
@@ -1079,7 +1560,24 @@ var awsPartition = partition{
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
"eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "dataexchange": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
"us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
"us-west-2": endpoint{},
},
},
@@ -1096,12 +1594,26 @@ var awsPartition = partition{
"datasync": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
+ "ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
"eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "fips-ca-central-1": endpoint{
+ Hostname: "datasync-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
"fips-us-east-1": endpoint{
Hostname: "datasync-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
@@ -1127,6 +1639,7 @@ var awsPartition = partition{
},
},
"me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-1": endpoint{},
@@ -1142,6 +1655,8 @@ var awsPartition = partition{
"ap-southeast-2": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
"sa-east-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
@@ -1158,6 +1673,7 @@ var awsPartition = partition{
"directconnect": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -1167,26 +1683,56 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
- "me-south-1": endpoint{},
- "sa-east-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "directconnect-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "directconnect-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "directconnect-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "directconnect-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"discovery": service{
Endpoints: endpoints{
- "us-west-2": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"dms": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -1194,17 +1740,24 @@ var awsPartition = partition{
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
"ca-central-1": endpoint{},
- "eu-central-1": endpoint{},
- "eu-north-1": endpoint{},
- "eu-west-1": endpoint{},
- "eu-west-2": endpoint{},
- "eu-west-3": endpoint{},
- "me-south-1": endpoint{},
- "sa-east-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "dms-fips": endpoint{
+ Hostname: "dms-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"docdb": service{
@@ -1222,12 +1775,30 @@ var awsPartition = partition{
Region: "ap-northeast-2",
},
},
+ "ap-south-1": endpoint{
+ Hostname: "rds.ap-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-1",
+ },
+ },
+ "ap-southeast-1": endpoint{
+ Hostname: "rds.ap-southeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-1",
+ },
+ },
"ap-southeast-2": endpoint{
Hostname: "rds.ap-southeast-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "ap-southeast-2",
},
},
+ "ca-central-1": endpoint{
+ Hostname: "rds.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
"eu-central-1": endpoint{
Hostname: "rds.eu-central-1.amazonaws.com",
CredentialScope: credentialScope{
@@ -1246,6 +1817,12 @@ var awsPartition = partition{
Region: "eu-west-2",
},
},
+ "eu-west-3": endpoint{
+ Hostname: "rds.eu-west-3.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-3",
+ },
+ },
"us-east-1": endpoint{
Hostname: "rds.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
@@ -1269,6 +1846,8 @@ var awsPartition = partition{
"ds": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
+ "ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
"ap-south-1": endpoint{},
@@ -1277,13 +1856,46 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
- "sa-east-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "fips-ca-central-1": endpoint{
+ Hostname: "ds-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ "fips-us-east-1": endpoint{
+ Hostname: "ds-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "ds-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "ds-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "ds-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"dynamodb": service{
@@ -1291,6 +1903,7 @@ var awsPartition = partition{
Protocols: []string{"http", "https"},
},
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -1306,6 +1919,7 @@ var awsPartition = partition{
},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
@@ -1353,6 +1967,7 @@ var awsPartition = partition{
Protocols: []string{"http", "https"},
},
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -1362,15 +1977,46 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
- "me-south-1": endpoint{},
- "sa-east-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "fips-ca-central-1": endpoint{
+ Hostname: "ec2-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ "fips-us-east-1": endpoint{
+ Hostname: "ec2-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "ec2-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "ec2-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "ec2-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"ec2metadata": service{
@@ -1387,6 +2033,7 @@ var awsPartition = partition{
"ecs": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -1396,20 +2043,90 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
- "me-south-1": endpoint{},
- "sa-east-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "ecs-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "ecs-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "ecs-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "ecs-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "eks": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "af-south-1": endpoint{},
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "fips.eks.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "fips.eks.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "fips.eks.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
},
},
"elasticache": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -1419,6 +2136,7 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
@@ -1439,6 +2157,7 @@ var awsPartition = partition{
"elasticbeanstalk": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -1448,20 +2167,47 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
- "me-south-1": endpoint{},
- "sa-east-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "elasticbeanstalk-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "elasticbeanstalk-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "elasticbeanstalk-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "elasticbeanstalk-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"elasticfilesystem": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
+ "ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
"ap-south-1": endpoint{},
@@ -1469,13 +2215,137 @@ var awsPartition = partition{
"ap-southeast-2": endpoint{},
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "fips-af-south-1": endpoint{
+ Hostname: "elasticfilesystem-fips.af-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "af-south-1",
+ },
+ },
+ "fips-ap-east-1": endpoint{
+ Hostname: "elasticfilesystem-fips.ap-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-east-1",
+ },
+ },
+ "fips-ap-northeast-1": endpoint{
+ Hostname: "elasticfilesystem-fips.ap-northeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-1",
+ },
+ },
+ "fips-ap-northeast-2": endpoint{
+ Hostname: "elasticfilesystem-fips.ap-northeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-2",
+ },
+ },
+ "fips-ap-south-1": endpoint{
+ Hostname: "elasticfilesystem-fips.ap-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-1",
+ },
+ },
+ "fips-ap-southeast-1": endpoint{
+ Hostname: "elasticfilesystem-fips.ap-southeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-1",
+ },
+ },
+ "fips-ap-southeast-2": endpoint{
+ Hostname: "elasticfilesystem-fips.ap-southeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-2",
+ },
+ },
+ "fips-ca-central-1": endpoint{
+ Hostname: "elasticfilesystem-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ "fips-eu-central-1": endpoint{
+ Hostname: "elasticfilesystem-fips.eu-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-1",
+ },
+ },
+ "fips-eu-north-1": endpoint{
+ Hostname: "elasticfilesystem-fips.eu-north-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-north-1",
+ },
+ },
+ "fips-eu-south-1": endpoint{
+ Hostname: "elasticfilesystem-fips.eu-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-south-1",
+ },
+ },
+ "fips-eu-west-1": endpoint{
+ Hostname: "elasticfilesystem-fips.eu-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
+ "fips-eu-west-2": endpoint{
+ Hostname: "elasticfilesystem-fips.eu-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-2",
+ },
+ },
+ "fips-eu-west-3": endpoint{
+ Hostname: "elasticfilesystem-fips.eu-west-3.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-3",
+ },
+ },
+ "fips-me-south-1": endpoint{
+ Hostname: "elasticfilesystem-fips.me-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "me-south-1",
+ },
+ },
+ "fips-sa-east-1": endpoint{
+ Hostname: "elasticfilesystem-fips.sa-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "sa-east-1",
+ },
+ },
+ "fips-us-east-1": endpoint{
+ Hostname: "elasticfilesystem-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "elasticfilesystem-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "elasticfilesystem-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "elasticfilesystem-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"elasticloadbalancing": service{
@@ -1483,6 +2353,7 @@ var awsPartition = partition{
Protocols: []string{"https"},
},
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -1492,15 +2363,40 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
- "me-south-1": endpoint{},
- "sa-east-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "elasticloadbalancing-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "elasticloadbalancing-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "elasticloadbalancing-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "elasticloadbalancing-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"elasticmapreduce": service{
@@ -1509,6 +2405,7 @@ var awsPartition = partition{
Protocols: []string{"https"},
},
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -1520,9 +2417,40 @@ var awsPartition = partition{
SSLCommonName: "{service}.{region}.{dnsSuffix}",
},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
+ "fips-ca-central-1": endpoint{
+ Hostname: "elasticmapreduce-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ "fips-us-east-1": endpoint{
+ Hostname: "elasticmapreduce-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "elasticmapreduce-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "elasticmapreduce-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "elasticmapreduce-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
"me-south-1": endpoint{},
"sa-east-1": endpoint{},
"us-east-1": endpoint{
@@ -1570,6 +2498,7 @@ var awsPartition = partition{
"es": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -1579,6 +2508,7 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
@@ -1599,6 +2529,7 @@ var awsPartition = partition{
"events": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -1608,20 +2539,46 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
- "me-south-1": endpoint{},
- "sa-east-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "events-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "events-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "events-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "events-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"firehose": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -1631,14 +2588,40 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
- "sa-east-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "firehose-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "firehose-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "firehose-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "firehose-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"fms": service{
@@ -1646,29 +2629,161 @@ var awsPartition = partition{
Protocols: []string{"https"},
},
Endpoints: endpoints{
+ "ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
"eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "fips-ap-northeast-1": endpoint{
+ Hostname: "fms-fips.ap-northeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-1",
+ },
+ },
+ "fips-ap-northeast-2": endpoint{
+ Hostname: "fms-fips.ap-northeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-2",
+ },
+ },
+ "fips-ap-south-1": endpoint{
+ Hostname: "fms-fips.ap-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-1",
+ },
+ },
+ "fips-ap-southeast-1": endpoint{
+ Hostname: "fms-fips.ap-southeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-1",
+ },
+ },
+ "fips-ap-southeast-2": endpoint{
+ Hostname: "fms-fips.ap-southeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-2",
+ },
+ },
+ "fips-ca-central-1": endpoint{
+ Hostname: "fms-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ "fips-eu-central-1": endpoint{
+ Hostname: "fms-fips.eu-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-1",
+ },
+ },
+ "fips-eu-west-1": endpoint{
+ Hostname: "fms-fips.eu-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
+ "fips-eu-west-2": endpoint{
+ Hostname: "fms-fips.eu-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-2",
+ },
+ },
+ "fips-eu-west-3": endpoint{
+ Hostname: "fms-fips.eu-west-3.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-3",
+ },
+ },
+ "fips-sa-east-1": endpoint{
+ Hostname: "fms-fips.sa-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "sa-east-1",
+ },
+ },
+ "fips-us-east-1": endpoint{
+ Hostname: "fms-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "fms-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "fms-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "fms-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "forecast": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "forecastquery": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
- "us-west-1": endpoint{},
"us-west-2": endpoint{},
},
},
"fsx": service{
Endpoints: endpoints{
+ "ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-1": endpoint{},
@@ -1699,6 +2814,7 @@ var awsPartition = partition{
Protocols: []string{"http", "https"},
},
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -1708,15 +2824,46 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
- "me-south-1": endpoint{},
- "sa-east-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "fips-ca-central-1": endpoint{
+ Hostname: "glacier-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ "fips-us-east-1": endpoint{
+ Hostname: "glacier-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "glacier-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "glacier-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "glacier-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"glue": service{
@@ -1734,12 +2881,36 @@ var awsPartition = partition{
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
- "me-south-1": endpoint{},
- "sa-east-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "glue-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "glue-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "glue-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "glue-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"greengrass": service{
@@ -1764,8 +2935,24 @@ var awsPartition = partition{
"groundstation": service{
Endpoints: endpoints{
- "us-east-2": endpoint{},
- "us-west-2": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "fips-us-east-2": endpoint{
+ Hostname: "groundstation-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "groundstation-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "me-south-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
},
},
"guardduty": service{
@@ -1774,6 +2961,7 @@ var awsPartition = partition{
Protocols: []string{"https"},
},
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -1783,6 +2971,7 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
@@ -1824,6 +3013,12 @@ var awsPartition = partition{
"us-east-1": endpoint{},
},
},
+ "honeycode": service{
+
+ Endpoints: endpoints{
+ "us-west-2": endpoint{},
+ },
+ },
"iam": service{
PartitionEndpoint: "aws-global",
IsRegionalized: boxedFalse,
@@ -1835,6 +3030,12 @@ var awsPartition = partition{
Region: "us-east-1",
},
},
+ "iam-fips": endpoint{
+ Hostname: "iam-fips.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
},
},
"importexport": service{
@@ -1863,10 +3064,34 @@ var awsPartition = partition{
"eu-north-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "inspector-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "inspector-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "inspector-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "inspector-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"iot": service{
@@ -1900,6 +3125,7 @@ var awsPartition = partition{
Endpoints: endpoints{
"ap-northeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
"us-east-1": endpoint{},
@@ -1911,9 +3137,12 @@ var awsPartition = partition{
Endpoints: endpoints{
"ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-2": endpoint{},
@@ -1928,6 +3157,18 @@ var awsPartition = partition{
Region: "ap-northeast-1",
},
},
+ "ap-northeast-2": endpoint{
+ Hostname: "data.iotevents.ap-northeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-2",
+ },
+ },
+ "ap-southeast-1": endpoint{
+ Hostname: "data.iotevents.ap-southeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-1",
+ },
+ },
"ap-southeast-2": endpoint{
Hostname: "data.iotevents.ap-southeast-2.amazonaws.com",
CredentialScope: credentialScope{
@@ -1946,6 +3187,12 @@ var awsPartition = partition{
Region: "eu-west-1",
},
},
+ "eu-west-2": endpoint{
+ Hostname: "data.iotevents.eu-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-2",
+ },
+ },
"us-east-1": endpoint{
Hostname: "data.iotevents.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
@@ -1966,6 +3213,29 @@ var awsPartition = partition{
},
},
},
+ "iotsecuredtunneling": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
"iotthingsgraph": service{
Defaults: endpoint{
CredentialScope: credentialScope{
@@ -1974,6 +3244,7 @@ var awsPartition = partition{
},
Endpoints: endpoints{
"ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
"ap-southeast-2": endpoint{},
"eu-west-1": endpoint{},
"us-east-1": endpoint{},
@@ -1983,24 +3254,30 @@ var awsPartition = partition{
"kafka": service{
Endpoints: endpoints{
+ "ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
+ "us-west-1": endpoint{},
"us-west-2": endpoint{},
},
},
"kinesis": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -2010,30 +3287,59 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
- "me-south-1": endpoint{},
- "sa-east-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "kinesis-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "kinesis-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "kinesis-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "kinesis-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"kinesisanalytics": service{
Endpoints: endpoints{
+ "ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-2": endpoint{},
@@ -2042,17 +3348,27 @@ var awsPartition = partition{
"kinesisvideo": service{
Endpoints: endpoints{
+ "ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
"us-east-1": endpoint{},
+ "us-east-2": endpoint{},
"us-west-2": endpoint{},
},
},
"kms": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -2062,6 +3378,7 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
@@ -2077,15 +3394,27 @@ var awsPartition = partition{
Endpoints: endpoints{
"ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
"eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
+ "us-west-1": endpoint{},
"us-west-2": endpoint{},
},
},
"lambda": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -2095,20 +3424,46 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
- "me-south-1": endpoint{},
- "sa-east-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "lambda-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "lambda-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "lambda-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "lambda-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"license-manager": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -2118,20 +3473,139 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "license-manager-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "license-manager-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "license-manager-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "license-manager-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "lightsail": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
- "me-south-1": endpoint{},
- "sa-east-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
- "us-west-1": endpoint{},
"us-west-2": endpoint{},
},
},
- "lightsail": service{
+ "logs": service{
+
+ Endpoints: endpoints{
+ "af-south-1": endpoint{},
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "logs-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "logs-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "logs-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "logs-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "machinelearning": service{
+
+ Endpoints: endpoints{
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ },
+ },
+ "macie": service{
+
+ Endpoints: endpoints{
+ "fips-us-east-1": endpoint{
+ Hostname: "macie-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "macie-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "macie2": service{
Endpoints: endpoints{
+ "ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
"ap-south-1": endpoint{},
@@ -2139,42 +3613,49 @@ var awsPartition = partition{
"ap-southeast-2": endpoint{},
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-2": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "macie2-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "macie2-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "macie2-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "macie2-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
- "logs": service{
+ "managedblockchain": service{
Endpoints: endpoints{
- "ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
- "ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
- "ap-southeast-2": endpoint{},
- "ca-central-1": endpoint{},
- "eu-central-1": endpoint{},
- "eu-north-1": endpoint{},
"eu-west-1": endpoint{},
- "eu-west-2": endpoint{},
- "eu-west-3": endpoint{},
- "me-south-1": endpoint{},
- "sa-east-1": endpoint{},
"us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
- },
- },
- "machinelearning": service{
-
- Endpoints: endpoints{
- "eu-west-1": endpoint{},
- "us-east-1": endpoint{},
},
},
"marketplacecommerceanalytics": service{
@@ -2186,6 +3667,7 @@ var awsPartition = partition{
"mediaconnect": service{
Endpoints: endpoints{
+ "ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
"ap-south-1": endpoint{},
@@ -2213,14 +3695,45 @@ var awsPartition = partition{
"ap-southeast-2": endpoint{},
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
- "sa-east-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "fips-ca-central-1": endpoint{
+ Hostname: "mediaconvert-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ "fips-us-east-1": endpoint{
+ Hostname: "mediaconvert-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "mediaconvert-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "mediaconvert-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "mediaconvert-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"medialive": service{
@@ -2234,8 +3747,11 @@ var awsPartition = partition{
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
"eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
"sa-east-1": endpoint{},
"us-east-1": endpoint{},
+ "us-east-2": endpoint{},
"us-west-2": endpoint{},
},
},
@@ -2248,6 +3764,7 @@ var awsPartition = partition{
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
"eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
@@ -2266,6 +3783,7 @@ var awsPartition = partition{
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
"eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
"us-east-1": endpoint{},
"us-west-2": endpoint{},
},
@@ -2277,6 +3795,7 @@ var awsPartition = partition{
},
},
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -2286,6 +3805,7 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
@@ -2300,7 +3820,11 @@ var awsPartition = partition{
"mgh": service{
Endpoints: endpoints{
- "us-west-2": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"mobileanalytics": service{
@@ -2316,9 +3840,14 @@ var awsPartition = partition{
},
},
Endpoints: endpoints{
- "eu-west-1": endpoint{},
- "us-east-1": endpoint{},
- "us-west-2": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"monitoring": service{
@@ -2326,6 +3855,7 @@ var awsPartition = partition{
Protocols: []string{"http", "https"},
},
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -2335,20 +3865,46 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
- "me-south-1": endpoint{},
- "sa-east-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "monitoring-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "monitoring-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "monitoring-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "monitoring-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"mq": service{
Endpoints: endpoints{
+ "ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
"ap-south-1": endpoint{},
@@ -2356,13 +3912,40 @@ var awsPartition = partition{
"ap-southeast-2": endpoint{},
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "mq-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "mq-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "mq-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "mq-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"mturk-requester": service{
@@ -2378,74 +3961,175 @@ var awsPartition = partition{
"neptune": service{
Endpoints: endpoints{
+ "ap-east-1": endpoint{
+ Hostname: "rds.ap-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-east-1",
+ },
+ },
"ap-northeast-1": endpoint{
Hostname: "rds.ap-northeast-1.amazonaws.com",
CredentialScope: credentialScope{
- Region: "ap-northeast-1",
+ Region: "ap-northeast-1",
+ },
+ },
+ "ap-northeast-2": endpoint{
+ Hostname: "rds.ap-northeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-2",
+ },
+ },
+ "ap-south-1": endpoint{
+ Hostname: "rds.ap-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-1",
+ },
+ },
+ "ap-southeast-1": endpoint{
+ Hostname: "rds.ap-southeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-1",
+ },
+ },
+ "ap-southeast-2": endpoint{
+ Hostname: "rds.ap-southeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-2",
+ },
+ },
+ "ca-central-1": endpoint{
+ Hostname: "rds.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ "eu-central-1": endpoint{
+ Hostname: "rds.eu-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-1",
+ },
+ },
+ "eu-north-1": endpoint{
+ Hostname: "rds.eu-north-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-north-1",
+ },
+ },
+ "eu-west-1": endpoint{
+ Hostname: "rds.eu-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
+ "eu-west-2": endpoint{
+ Hostname: "rds.eu-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-2",
+ },
+ },
+ "eu-west-3": endpoint{
+ Hostname: "rds.eu-west-3.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-3",
+ },
+ },
+ "me-south-1": endpoint{
+ Hostname: "rds.me-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "me-south-1",
+ },
+ },
+ "sa-east-1": endpoint{
+ Hostname: "rds.sa-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "sa-east-1",
+ },
+ },
+ "us-east-1": endpoint{
+ Hostname: "rds.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{
+ Hostname: "rds.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
},
},
- "ap-northeast-2": endpoint{
- Hostname: "rds.ap-northeast-2.amazonaws.com",
+ "us-west-1": endpoint{
+ Hostname: "rds.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
- Region: "ap-northeast-2",
+ Region: "us-west-1",
},
},
- "ap-south-1": endpoint{
- Hostname: "rds.ap-south-1.amazonaws.com",
+ "us-west-2": endpoint{
+ Hostname: "rds.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
- Region: "ap-south-1",
+ Region: "us-west-2",
},
},
+ },
+ },
+ "oidc": service{
+
+ Endpoints: endpoints{
"ap-southeast-1": endpoint{
- Hostname: "rds.ap-southeast-1.amazonaws.com",
+ Hostname: "oidc.ap-southeast-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "ap-southeast-1",
},
},
"ap-southeast-2": endpoint{
- Hostname: "rds.ap-southeast-2.amazonaws.com",
+ Hostname: "oidc.ap-southeast-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "ap-southeast-2",
},
},
+ "ca-central-1": endpoint{
+ Hostname: "oidc.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
"eu-central-1": endpoint{
- Hostname: "rds.eu-central-1.amazonaws.com",
+ Hostname: "oidc.eu-central-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "eu-central-1",
},
},
"eu-north-1": endpoint{
- Hostname: "rds.eu-north-1.amazonaws.com",
+ Hostname: "oidc.eu-north-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "eu-north-1",
},
},
"eu-west-1": endpoint{
- Hostname: "rds.eu-west-1.amazonaws.com",
+ Hostname: "oidc.eu-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "eu-west-1",
},
},
"eu-west-2": endpoint{
- Hostname: "rds.eu-west-2.amazonaws.com",
+ Hostname: "oidc.eu-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "eu-west-2",
},
},
"us-east-1": endpoint{
- Hostname: "rds.us-east-1.amazonaws.com",
+ Hostname: "oidc.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
},
"us-east-2": endpoint{
- Hostname: "rds.us-east-2.amazonaws.com",
+ Hostname: "oidc.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
},
"us-west-2": endpoint{
- Hostname: "rds.us-west-2.amazonaws.com",
+ Hostname: "oidc.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
@@ -2497,6 +4181,67 @@ var awsPartition = partition{
Region: "us-east-1",
},
},
+ "fips-aws-global": endpoint{
+ Hostname: "organizations-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "outposts": service{
+
+ Endpoints: endpoints{
+ "af-south-1": endpoint{},
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "fips-ca-central-1": endpoint{
+ Hostname: "outposts-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ "fips-us-east-1": endpoint{
+ Hostname: "outposts-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "outposts-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "outposts-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "outposts-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"pinpoint": service{
@@ -2506,17 +4251,41 @@ var awsPartition = partition{
},
},
Endpoints: endpoints{
+ "ap-northeast-2": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-2": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
- "us-east-1": endpoint{},
- "us-west-2": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "pinpoint-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "pinpoint-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "us-east-1": endpoint{
+ Hostname: "pinpoint.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-west-2": endpoint{
+ Hostname: "pinpoint.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
},
},
"polly": service{
Endpoints: endpoints{
+ "ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
"ap-south-1": endpoint{},
@@ -2528,11 +4297,95 @@ var awsPartition = partition{
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
- "sa-east-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "polly-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "polly-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "polly-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "polly-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "portal.sso": service{
+
+ Endpoints: endpoints{
+ "ap-southeast-1": endpoint{
+ Hostname: "portal.sso.ap-southeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-1",
+ },
+ },
+ "ap-southeast-2": endpoint{
+ Hostname: "portal.sso.ap-southeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-2",
+ },
+ },
+ "ca-central-1": endpoint{
+ Hostname: "portal.sso.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ "eu-central-1": endpoint{
+ Hostname: "portal.sso.eu-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-1",
+ },
+ },
+ "eu-west-1": endpoint{
+ Hostname: "portal.sso.eu-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
+ "eu-west-2": endpoint{
+ Hostname: "portal.sso.eu-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-2",
+ },
+ },
+ "us-east-1": endpoint{
+ Hostname: "portal.sso.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{
+ Hostname: "portal.sso.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-2": endpoint{
+ Hostname: "portal.sso.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
},
},
"projects.iot1click": service{
@@ -2551,6 +4404,10 @@ var awsPartition = partition{
Endpoints: endpoints{
"ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
"eu-west-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
@@ -2560,6 +4417,8 @@ var awsPartition = partition{
"ram": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
+ "ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
"ap-south-1": endpoint{},
@@ -2568,9 +4427,12 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-1": endpoint{},
@@ -2580,6 +4442,7 @@ var awsPartition = partition{
"rds": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -2589,11 +4452,42 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
"me-south-1": endpoint{},
- "sa-east-1": endpoint{},
+ "rds-fips.ca-central-1": endpoint{
+ Hostname: "rds-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ "rds-fips.us-east-1": endpoint{
+ Hostname: "rds-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "rds-fips.us-east-2": endpoint{
+ Hostname: "rds-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "rds-fips.us-west-1": endpoint{
+ Hostname: "rds-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "rds-fips.us-west-2": endpoint{
+ Hostname: "rds-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "sa-east-1": endpoint{},
"us-east-1": endpoint{
SSLCommonName: "{service}.{dnsSuffix}",
},
@@ -2605,6 +4499,7 @@ var awsPartition = partition{
"redshift": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -2614,15 +4509,46 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
- "me-south-1": endpoint{},
- "sa-east-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "fips-ca-central-1": endpoint{
+ Hostname: "redshift-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ "fips-us-east-1": endpoint{
+ Hostname: "redshift-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "redshift-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "redshift-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "redshift-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"rekognition": service{
@@ -2636,15 +4562,40 @@ var awsPartition = partition{
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "rekognition-fips.us-east-1": endpoint{
+ Hostname: "rekognition-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "rekognition-fips.us-east-2": endpoint{
+ Hostname: "rekognition-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "rekognition-fips.us-west-1": endpoint{
+ Hostname: "rekognition-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "rekognition-fips.us-west-2": endpoint{
+ Hostname: "rekognition-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"resource-groups": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -2654,6 +4605,7 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
@@ -2725,6 +4677,8 @@ var awsPartition = partition{
Protocols: []string{"https"},
},
Endpoints: endpoints{
+ "af-south-1": endpoint{},
+ "ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
"ap-south-1": endpoint{},
@@ -2732,9 +4686,13 @@ var awsPartition = partition{
"ap-southeast-2": endpoint{},
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-1": endpoint{},
@@ -2748,9 +4706,14 @@ var awsPartition = partition{
},
},
Endpoints: endpoints{
- "eu-west-1": endpoint{},
- "us-east-1": endpoint{},
- "us-west-2": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"runtime.sagemaker": service{
@@ -2768,6 +4731,7 @@ var awsPartition = partition{
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
"sa-east-1": endpoint{},
"us-east-1": endpoint{},
"us-east-1-fips": endpoint{
@@ -2800,7 +4764,7 @@ var awsPartition = partition{
},
},
"s3": service{
- PartitionEndpoint: "us-east-1",
+ PartitionEndpoint: "aws-global",
IsRegionalized: boxedTrue,
Defaults: endpoint{
Protocols: []string{"http", "https"},
@@ -2810,7 +4774,8 @@ var awsPartition = partition{
DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}",
},
Endpoints: endpoints{
- "ap-east-1": endpoint{},
+ "af-south-1": endpoint{},
+ "ap-east-1": endpoint{},
"ap-northeast-1": endpoint{
Hostname: "s3.ap-northeast-1.amazonaws.com",
SignatureVersions: []string{"s3", "s3v4"},
@@ -2825,9 +4790,17 @@ var awsPartition = partition{
Hostname: "s3.ap-southeast-2.amazonaws.com",
SignatureVersions: []string{"s3", "s3v4"},
},
+ "aws-global": endpoint{
+ Hostname: "s3.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{
Hostname: "s3.eu-west-1.amazonaws.com",
SignatureVersions: []string{"s3", "s3v4"},
@@ -2847,7 +4820,7 @@ var awsPartition = partition{
SignatureVersions: []string{"s3", "s3v4"},
},
"us-east-1": endpoint{
- Hostname: "s3.amazonaws.com",
+ Hostname: "s3.us-east-1.amazonaws.com",
SignatureVersions: []string{"s3", "s3v4"},
},
"us-east-2": endpoint{},
@@ -3012,6 +4985,41 @@ var awsPartition = partition{
},
},
},
+ "savingsplans": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "savingsplans.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "schemas": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
"sdb": service{
Defaults: endpoint{
Protocols: []string{"http", "https"},
@@ -3033,6 +5041,8 @@ var awsPartition = partition{
"secretsmanager": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
+ "ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
"ap-south-1": endpoint{},
@@ -3041,9 +5051,11 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
"sa-east-1": endpoint{},
"us-east-1": endpoint{},
"us-east-1-fips": endpoint{
@@ -3078,6 +5090,7 @@ var awsPartition = partition{
"securityhub": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -3087,14 +5100,40 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
- "sa-east-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "securityhub-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "securityhub-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "securityhub-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "securityhub-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"serverlessrepo": service{
@@ -3102,6 +5141,9 @@ var awsPartition = partition{
Protocols: []string{"https"},
},
Endpoints: endpoints{
+ "ap-east-1": endpoint{
+ Protocols: []string{"https"},
+ },
"ap-northeast-1": endpoint{
Protocols: []string{"https"},
},
@@ -3135,6 +5177,9 @@ var awsPartition = partition{
"eu-west-3": endpoint{
Protocols: []string{"https"},
},
+ "me-south-1": endpoint{
+ Protocols: []string{"https"},
+ },
"sa-east-1": endpoint{
Protocols: []string{"https"},
},
@@ -3155,6 +5200,8 @@ var awsPartition = partition{
"servicecatalog": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
+ "ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
"ap-south-1": endpoint{},
@@ -3163,9 +5210,11 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
"sa-east-1": endpoint{},
"us-east-1": endpoint{},
"us-east-1-fips": endpoint{
@@ -3220,10 +5269,39 @@ var awsPartition = partition{
"us-west-2": endpoint{},
},
},
+ "servicequotas": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
"session.qldb": service{
Endpoints: endpoints{
"ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
"eu-west-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
@@ -3231,18 +5309,31 @@ var awsPartition = partition{
},
},
"shield": service{
- IsRegionalized: boxedFalse,
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
Defaults: endpoint{
SSLCommonName: "shield.us-east-1.amazonaws.com",
Protocols: []string{"https"},
},
Endpoints: endpoints{
- "us-east-1": endpoint{},
+ "aws-global": endpoint{
+ Hostname: "shield.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-aws-global": endpoint{
+ Hostname: "shield-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
},
},
"sms": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -3252,15 +5343,40 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
- "me-south-1": endpoint{},
- "sa-east-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "sms-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "sms-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "sms-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "sms-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"snowball": service{
@@ -3276,11 +5392,101 @@ var awsPartition = partition{
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
- "sa-east-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "fips-ap-northeast-1": endpoint{
+ Hostname: "snowball-fips.ap-northeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-1",
+ },
+ },
+ "fips-ap-northeast-2": endpoint{
+ Hostname: "snowball-fips.ap-northeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-2",
+ },
+ },
+ "fips-ap-south-1": endpoint{
+ Hostname: "snowball-fips.ap-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-1",
+ },
+ },
+ "fips-ap-southeast-1": endpoint{
+ Hostname: "snowball-fips.ap-southeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-1",
+ },
+ },
+ "fips-ap-southeast-2": endpoint{
+ Hostname: "snowball-fips.ap-southeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-2",
+ },
+ },
+ "fips-ca-central-1": endpoint{
+ Hostname: "snowball-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ "fips-eu-central-1": endpoint{
+ Hostname: "snowball-fips.eu-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-1",
+ },
+ },
+ "fips-eu-west-1": endpoint{
+ Hostname: "snowball-fips.eu-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
+ "fips-eu-west-2": endpoint{
+ Hostname: "snowball-fips.eu-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-2",
+ },
+ },
+ "fips-eu-west-3": endpoint{
+ Hostname: "snowball-fips.eu-west-3.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-3",
+ },
+ },
+ "fips-sa-east-1": endpoint{
+ Hostname: "snowball-fips.sa-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "sa-east-1",
+ },
+ },
+ "fips-us-east-1": endpoint{
+ Hostname: "snowball-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "snowball-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "snowball-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "snowball-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"sns": service{
@@ -3288,6 +5494,7 @@ var awsPartition = partition{
Protocols: []string{"http", "https"},
},
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -3297,15 +5504,40 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
- "me-south-1": endpoint{},
- "sa-east-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "sns-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "sns-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "sns-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "sns-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"sqs": service{
@@ -3314,6 +5546,7 @@ var awsPartition = partition{
Protocols: []string{"http", "https"},
},
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -3323,6 +5556,7 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
@@ -3363,6 +5597,7 @@ var awsPartition = partition{
"ssm": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -3372,20 +5607,70 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
- "me-south-1": endpoint{},
- "sa-east-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "ssm-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "ssm-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "ssm-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "ssm-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "ssm-facade-fips-us-east-1": endpoint{
+ Hostname: "ssm-facade-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "ssm-facade-fips-us-east-2": endpoint{
+ Hostname: "ssm-facade-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "ssm-facade-fips-us-west-1": endpoint{
+ Hostname: "ssm-facade-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "ssm-facade-fips-us-west-2": endpoint{
+ Hostname: "ssm-facade-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"states": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -3395,20 +5680,46 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
- "me-south-1": endpoint{},
- "sa-east-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "states-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "states-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "states-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "states-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"storagegateway": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -3418,15 +5729,22 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
- "me-south-1": endpoint{},
- "sa-east-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "fips": endpoint{
+ Hostname: "storagegateway-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"streams.dynamodb": service{
@@ -3495,44 +5813,31 @@ var awsPartition = partition{
},
"sts": service{
PartitionEndpoint: "aws-global",
- Defaults: endpoint{
- Hostname: "sts.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
+
Endpoints: endpoints{
- "ap-east-1": endpoint{
- Hostname: "sts.ap-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-east-1",
- },
- },
+ "af-south-1": endpoint{},
+ "ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
- "ap-northeast-2": endpoint{
- Hostname: "sts.ap-northeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-2",
- },
- },
+ "ap-northeast-2": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
- "aws-global": endpoint{},
- "ca-central-1": endpoint{},
- "eu-central-1": endpoint{},
- "eu-north-1": endpoint{},
- "eu-west-1": endpoint{},
- "eu-west-2": endpoint{},
- "eu-west-3": endpoint{},
- "me-south-1": endpoint{
- Hostname: "sts.me-south-1.amazonaws.com",
+ "aws-global": endpoint{
+ Hostname: "sts.amazonaws.com",
CredentialScope: credentialScope{
- Region: "me-south-1",
+ Region: "us-east-1",
},
},
- "sa-east-1": endpoint{},
- "us-east-1": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
"us-east-1-fips": endpoint{
Hostname: "sts-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
@@ -3577,6 +5882,56 @@ var awsPartition = partition{
"swf": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "swf-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "swf-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "swf-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "swf-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "tagging": service{
+
+ Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -3586,6 +5941,7 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
@@ -3597,8 +5953,10 @@ var awsPartition = partition{
"us-west-2": endpoint{},
},
},
- "tagging": service{
-
+ "transcribe": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
Endpoints: endpoints{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
@@ -3607,16 +5965,50 @@ var awsPartition = partition{
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
"ca-central-1": endpoint{},
- "eu-central-1": endpoint{},
- "eu-north-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "fips.transcribe.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "fips.transcribe.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "fips.transcribe.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "fips.transcribe.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "transcribestreaming": service{
+
+ Endpoints: endpoints{
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
"eu-west-1": endpoint{},
- "eu-west-2": endpoint{},
- "eu-west-3": endpoint{},
- "me-south-1": endpoint{},
- "sa-east-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
- "us-west-1": endpoint{},
"us-west-2": endpoint{},
},
},
@@ -3646,13 +6038,18 @@ var awsPartition = partition{
Protocols: []string{"https"},
},
Endpoints: endpoints{
+ "ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
"eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
"us-east-1": endpoint{},
"us-east-1-fips": endpoint{
Hostname: "translate-fips.us-east-1.amazonaws.com",
@@ -3667,6 +6064,7 @@ var awsPartition = partition{
Region: "us-east-2",
},
},
+ "us-west-1": endpoint{},
"us-west-2": endpoint{},
"us-west-2-fips": endpoint{
Hostname: "translate-fips.us-west-2.amazonaws.com",
@@ -3681,6 +6079,12 @@ var awsPartition = partition{
IsRegionalized: boxedFalse,
Endpoints: endpoints{
+ "aws-fips": endpoint{
+ Hostname: "waf-fips.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
"aws-global": endpoint{
Hostname: "waf.amazonaws.com",
CredentialScope: credentialScope{
@@ -3692,22 +6096,222 @@ var awsPartition = partition{
"waf-regional": service{
Endpoints: endpoints{
- "ap-northeast-1": endpoint{},
- "ap-northeast-2": endpoint{},
- "ap-south-1": endpoint{},
- "ap-southeast-1": endpoint{},
- "ap-southeast-2": endpoint{},
- "ca-central-1": endpoint{},
- "eu-central-1": endpoint{},
- "eu-north-1": endpoint{},
- "eu-west-1": endpoint{},
- "eu-west-2": endpoint{},
- "eu-west-3": endpoint{},
- "sa-east-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "ap-east-1": endpoint{
+ Hostname: "waf-regional.ap-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-east-1",
+ },
+ },
+ "ap-northeast-1": endpoint{
+ Hostname: "waf-regional.ap-northeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-1",
+ },
+ },
+ "ap-northeast-2": endpoint{
+ Hostname: "waf-regional.ap-northeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-2",
+ },
+ },
+ "ap-south-1": endpoint{
+ Hostname: "waf-regional.ap-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-1",
+ },
+ },
+ "ap-southeast-1": endpoint{
+ Hostname: "waf-regional.ap-southeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-1",
+ },
+ },
+ "ap-southeast-2": endpoint{
+ Hostname: "waf-regional.ap-southeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-2",
+ },
+ },
+ "ca-central-1": endpoint{
+ Hostname: "waf-regional.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ "eu-central-1": endpoint{
+ Hostname: "waf-regional.eu-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-1",
+ },
+ },
+ "eu-north-1": endpoint{
+ Hostname: "waf-regional.eu-north-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-north-1",
+ },
+ },
+ "eu-west-1": endpoint{
+ Hostname: "waf-regional.eu-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
+ "eu-west-2": endpoint{
+ Hostname: "waf-regional.eu-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-2",
+ },
+ },
+ "eu-west-3": endpoint{
+ Hostname: "waf-regional.eu-west-3.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-3",
+ },
+ },
+ "fips-ap-east-1": endpoint{
+ Hostname: "waf-regional-fips.ap-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-east-1",
+ },
+ },
+ "fips-ap-northeast-1": endpoint{
+ Hostname: "waf-regional-fips.ap-northeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-1",
+ },
+ },
+ "fips-ap-northeast-2": endpoint{
+ Hostname: "waf-regional-fips.ap-northeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-2",
+ },
+ },
+ "fips-ap-south-1": endpoint{
+ Hostname: "waf-regional-fips.ap-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-1",
+ },
+ },
+ "fips-ap-southeast-1": endpoint{
+ Hostname: "waf-regional-fips.ap-southeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-1",
+ },
+ },
+ "fips-ap-southeast-2": endpoint{
+ Hostname: "waf-regional-fips.ap-southeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-2",
+ },
+ },
+ "fips-ca-central-1": endpoint{
+ Hostname: "waf-regional-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ "fips-eu-central-1": endpoint{
+ Hostname: "waf-regional-fips.eu-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-1",
+ },
+ },
+ "fips-eu-north-1": endpoint{
+ Hostname: "waf-regional-fips.eu-north-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-north-1",
+ },
+ },
+ "fips-eu-west-1": endpoint{
+ Hostname: "waf-regional-fips.eu-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
+ "fips-eu-west-2": endpoint{
+ Hostname: "waf-regional-fips.eu-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-2",
+ },
+ },
+ "fips-eu-west-3": endpoint{
+ Hostname: "waf-regional-fips.eu-west-3.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-3",
+ },
+ },
+ "fips-me-south-1": endpoint{
+ Hostname: "waf-regional-fips.me-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "me-south-1",
+ },
+ },
+ "fips-sa-east-1": endpoint{
+ Hostname: "waf-regional-fips.sa-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "sa-east-1",
+ },
+ },
+ "fips-us-east-1": endpoint{
+ Hostname: "waf-regional-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "waf-regional-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "waf-regional-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "waf-regional-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "me-south-1": endpoint{
+ Hostname: "waf-regional.me-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "me-south-1",
+ },
+ },
+ "sa-east-1": endpoint{
+ Hostname: "waf-regional.sa-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "sa-east-1",
+ },
+ },
+ "us-east-1": endpoint{
+ Hostname: "waf-regional.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{
+ Hostname: "waf-regional.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-1": endpoint{
+ Hostname: "waf-regional.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "us-west-2": endpoint{
+ Hostname: "waf-regional.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
},
},
"workdocs": service{
@@ -3717,8 +6321,20 @@ var awsPartition = partition{
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
"eu-west-1": endpoint{},
- "us-east-1": endpoint{},
- "us-west-2": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "workdocs-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "workdocs-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"workmail": service{
@@ -3750,6 +6366,7 @@ var awsPartition = partition{
"xray": service{
Endpoints: endpoints{
+ "af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@@ -3759,6 +6376,7 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
+ "eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
@@ -3802,6 +6420,20 @@ var awscnPartition = partition{
},
},
Services: services{
+ "access-analyzer": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "acm": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
"api.ecr": service{
Endpoints: endpoints{
@@ -3819,27 +6451,52 @@ var awscnPartition = partition{
},
},
},
- "apigateway": service{
+ "api.sagemaker": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "apigateway": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "application-autoscaling": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "appsync": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "athena": service{
Endpoints: endpoints{
"cn-north-1": endpoint{},
"cn-northwest-1": endpoint{},
},
},
- "application-autoscaling": service{
+ "autoscaling": service{
Defaults: endpoint{
- Hostname: "autoscaling.{region}.amazonaws.com.cn",
Protocols: []string{"http", "https"},
- CredentialScope: credentialScope{
- Service: "application-autoscaling",
- },
},
Endpoints: endpoints{
"cn-north-1": endpoint{},
"cn-northwest-1": endpoint{},
},
},
- "autoscaling": service{
+ "autoscaling-plans": service{
Defaults: endpoint{
Protocols: []string{"http", "https"},
},
@@ -3848,6 +6505,46 @@ var awscnPartition = partition{
"cn-northwest-1": endpoint{},
},
},
+ "backup": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "batch": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "budgets": service{
+ PartitionEndpoint: "aws-cn-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-cn-global": endpoint{
+ Hostname: "budgets.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ },
+ },
+ "ce": service{
+ PartitionEndpoint: "aws-cn-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-cn-global": endpoint{
+ Hostname: "ce.cn-northwest-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ },
+ },
"cloudformation": service{
Endpoints: endpoints{
@@ -3883,6 +6580,13 @@ var awscnPartition = partition{
"cn-northwest-1": endpoint{},
},
},
+ "codecommit": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
"codedeploy": service{
Endpoints: endpoints{
@@ -3903,6 +6607,18 @@ var awscnPartition = partition{
"cn-northwest-1": endpoint{},
},
},
+ "cur": service{
+
+ Endpoints: endpoints{
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "dax": service{
+
+ Endpoints: endpoints{
+ "cn-northwest-1": endpoint{},
+ },
+ },
"directconnect": service{
Endpoints: endpoints{
@@ -3960,6 +6676,15 @@ var awscnPartition = partition{
"cn-northwest-1": endpoint{},
},
},
+ "eks": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
"elasticache": service{
Endpoints: endpoints{
@@ -3974,6 +6699,25 @@ var awscnPartition = partition{
"cn-northwest-1": endpoint{},
},
},
+ "elasticfilesystem": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ "fips-cn-north-1": endpoint{
+ Hostname: "elasticfilesystem-fips.cn-north-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-north-1",
+ },
+ },
+ "fips-cn-northwest-1": endpoint{
+ Hostname: "elasticfilesystem-fips.cn-northwest-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ },
+ },
"elasticloadbalancing": service{
Defaults: endpoint{
Protocols: []string{"https"},
@@ -4028,6 +6772,13 @@ var awscnPartition = partition{
"cn-northwest-1": endpoint{},
},
},
+ "glue": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
"greengrass": service{
IsRegionalized: boxedTrue,
Defaults: endpoint{
@@ -4037,6 +6788,13 @@ var awscnPartition = partition{
"cn-north-1": endpoint{},
},
},
+ "health": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
"iam": service{
PartitionEndpoint: "aws-cn-global",
IsRegionalized: boxedFalse,
@@ -4061,6 +6819,20 @@ var awscnPartition = partition{
"cn-northwest-1": endpoint{},
},
},
+ "iotsecuredtunneling": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "kafka": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
"kinesis": service{
Endpoints: endpoints{
@@ -4068,6 +6840,13 @@ var awscnPartition = partition{
"cn-northwest-1": endpoint{},
},
},
+ "kinesisanalytics": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
"kms": service{
Endpoints: endpoints{
@@ -4116,6 +6895,36 @@ var awscnPartition = partition{
"cn-northwest-1": endpoint{},
},
},
+ "neptune": service{
+
+ Endpoints: endpoints{
+ "cn-northwest-1": endpoint{
+ Hostname: "rds.cn-northwest-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ },
+ },
+ "organizations": service{
+ PartitionEndpoint: "aws-cn-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-cn-global": endpoint{
+ Hostname: "organizations.cn-northwest-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ "fips-aws-cn-global": endpoint{
+ Hostname: "organizations.cn-northwest-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ },
+ },
"polly": service{
Endpoints: endpoints{
@@ -4136,10 +6945,33 @@ var awscnPartition = partition{
"cn-northwest-1": endpoint{},
},
},
+ "route53": service{
+ PartitionEndpoint: "aws-cn-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-cn-global": endpoint{
+ Hostname: "route53.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ },
+ },
+ "runtime.sagemaker": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
"s3": service{
Defaults: endpoint{
Protocols: []string{"http", "https"},
SignatureVersions: []string{"s3v4"},
+
+ HasDualStack: boxedTrue,
+ DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}",
},
Endpoints: endpoints{
"cn-north-1": endpoint{},
@@ -4150,6 +6982,9 @@ var awscnPartition = partition{
Defaults: endpoint{
Protocols: []string{"https"},
SignatureVersions: []string{"s3v4"},
+
+ HasDualStack: boxedTrue,
+ DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}",
},
Endpoints: endpoints{
"cn-north-1": endpoint{
@@ -4168,6 +7003,26 @@ var awscnPartition = partition{
},
},
},
+ "secretsmanager": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "serverlessrepo": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ "cn-northwest-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ },
"sms": service{
Endpoints: endpoints{
@@ -4178,7 +7033,20 @@ var awscnPartition = partition{
"snowball": service{
Endpoints: endpoints{
- "cn-north-1": endpoint{},
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ "fips-cn-north-1": endpoint{
+ Hostname: "snowball-fips.cn-north-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-north-1",
+ },
+ },
+ "fips-cn-northwest-1": endpoint{
+ Hostname: "snowball-fips.cn-northwest-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
},
},
"sns": service{
@@ -4217,7 +7085,8 @@ var awscnPartition = partition{
"storagegateway": service{
Endpoints: endpoints{
- "cn-north-1": endpoint{},
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
},
},
"streams.dynamodb": service{
@@ -4244,7 +7113,7 @@ var awscnPartition = partition{
Endpoints: endpoints{
"aws-cn-global": endpoint{
- Hostname: "support.cn-north-1.amazonaws.com",
+ Hostname: "support.cn-north-1.amazonaws.com.cn",
CredentialScope: credentialScope{
Region: "cn-north-1",
},
@@ -4260,6 +7129,38 @@ var awscnPartition = partition{
},
"tagging": service{
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "transcribe": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{
+ Hostname: "cn.transcribe.cn-north-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-north-1",
+ },
+ },
+ "cn-northwest-1": endpoint{
+ Hostname: "cn.transcribe.cn-northwest-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ },
+ },
+ "workspaces": service{
+
+ Endpoints: endpoints{
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "xray": service{
+
Endpoints: endpoints{
"cn-north-1": endpoint{},
"cn-northwest-1": endpoint{},
@@ -4293,10 +7194,27 @@ var awsusgovPartition = partition{
Description: "AWS GovCloud (US-East)",
},
"us-gov-west-1": region{
- Description: "AWS GovCloud (US)",
+ Description: "AWS GovCloud (US-West)",
},
},
Services: services{
+ "access-analyzer": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{
+ Hostname: "access-analyzer.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "us-gov-west-1": endpoint{
+ Hostname: "access-analyzer.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
"acm": service{
Endpoints: endpoints{
@@ -4309,6 +7227,18 @@ var awsusgovPartition = partition{
Protocols: []string{"https"},
},
Endpoints: endpoints{
+ "fips-us-gov-east-1": endpoint{
+ Hostname: "acm-pca.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "acm-pca.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
"us-gov-east-1": endpoint{},
"us-gov-west-1": endpoint{},
},
@@ -4316,6 +7246,18 @@ var awsusgovPartition = partition{
"api.ecr": service{
Endpoints: endpoints{
+ "fips-us-gov-east-1": endpoint{
+ Hostname: "ecr-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "ecr-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
"us-gov-east-1": endpoint{
Hostname: "api.ecr.us-gov-east-1.amazonaws.com",
CredentialScope: credentialScope{
@@ -4334,6 +7276,18 @@ var awsusgovPartition = partition{
Endpoints: endpoints{
"us-gov-west-1": endpoint{},
+ "us-gov-west-1-fips": endpoint{
+ Hostname: "api-fips.sagemaker.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-west-1-fips-secondary": endpoint{
+ Hostname: "api.sagemaker.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
},
},
"apigateway": service{
@@ -4345,7 +7299,8 @@ var awsusgovPartition = partition{
},
"application-autoscaling": service{
Defaults: endpoint{
- Hostname: "autoscaling.{region}.amazonaws.com",
+ Hostname: "autoscaling.{region}.amazonaws.com",
+ Protocols: []string{"http", "https"},
CredentialScope: credentialScope{
Service: "application-autoscaling",
},
@@ -4355,9 +7310,38 @@ var awsusgovPartition = partition{
"us-gov-west-1": endpoint{},
},
},
+ "appstream2": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ CredentialScope: credentialScope{
+ Service: "appstream",
+ },
+ },
+ Endpoints: endpoints{
+ "fips": endpoint{
+ Hostname: "appstream2-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-west-1": endpoint{},
+ },
+ },
"athena": service{
Endpoints: endpoints{
+ "fips-us-gov-east-1": endpoint{
+ Hostname: "athena-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "athena-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
"us-gov-east-1": endpoint{},
"us-gov-west-1": endpoint{},
},
@@ -4365,25 +7349,72 @@ var awsusgovPartition = partition{
"autoscaling": service{
Endpoints: endpoints{
- "us-gov-east-1": endpoint{},
+ "us-gov-east-1": endpoint{
+ Protocols: []string{"http", "https"},
+ },
"us-gov-west-1": endpoint{
Protocols: []string{"http", "https"},
},
},
},
- "clouddirectory": service{
+ "autoscaling-plans": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "backup": service{
Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
"us-gov-west-1": endpoint{},
},
},
- "cloudformation": service{
+ "batch": service{
Endpoints: endpoints{
+ "fips-us-gov-east-1": endpoint{
+ Hostname: "batch.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "batch.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
"us-gov-east-1": endpoint{},
"us-gov-west-1": endpoint{},
},
},
+ "clouddirectory": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "cloudformation": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{
+ Hostname: "cloudformation.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "us-gov-west-1": endpoint{
+ Hostname: "cloudformation.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
"cloudhsm": service{
Endpoints: endpoints{
@@ -4404,20 +7435,48 @@ var awsusgovPartition = partition{
"cloudtrail": service{
Endpoints: endpoints{
- "us-gov-east-1": endpoint{},
- "us-gov-west-1": endpoint{},
+ "us-gov-east-1": endpoint{
+ Hostname: "cloudtrail.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "us-gov-west-1": endpoint{
+ Hostname: "cloudtrail.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
},
},
"codebuild": service{
Endpoints: endpoints{
"us-gov-east-1": endpoint{},
+ "us-gov-east-1-fips": endpoint{
+ Hostname: "codebuild-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
"us-gov-west-1": endpoint{},
+ "us-gov-west-1-fips": endpoint{
+ Hostname: "codebuild-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
},
},
"codecommit": service{
Endpoints: endpoints{
+ "fips": endpoint{
+ Hostname: "codecommit-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
"us-gov-east-1": endpoint{},
"us-gov-west-1": endpoint{},
},
@@ -4441,11 +7500,59 @@ var awsusgovPartition = partition{
},
},
},
+ "codepipeline": service{
+
+ Endpoints: endpoints{
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "codepipeline-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "cognito-identity": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "cognito-idp": service{
+
+ Endpoints: endpoints{
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "cognito-idp-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-west-1": endpoint{},
+ },
+ },
"comprehend": service{
Defaults: endpoint{
Protocols: []string{"https"},
},
Endpoints: endpoints{
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "comprehend-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "comprehendmedical": service{
+
+ Endpoints: endpoints{
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "comprehendmedical-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
"us-gov-west-1": endpoint{},
},
},
@@ -4459,32 +7566,78 @@ var awsusgovPartition = partition{
"datasync": service{
Endpoints: endpoints{
+ "fips-us-gov-east-1": endpoint{
+ Hostname: "datasync-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
"fips-us-gov-west-1": endpoint{
Hostname: "datasync-fips.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-west-1",
},
},
+ "us-gov-east-1": endpoint{},
"us-gov-west-1": endpoint{},
},
},
"directconnect": service{
Endpoints: endpoints{
- "us-gov-east-1": endpoint{},
- "us-gov-west-1": endpoint{},
+ "us-gov-east-1": endpoint{
+ Hostname: "directconnect.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "us-gov-west-1": endpoint{
+ Hostname: "directconnect.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
},
},
"dms": service{
Endpoints: endpoints{
+ "dms-fips": endpoint{
+ Hostname: "dms.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
"us-gov-east-1": endpoint{},
"us-gov-west-1": endpoint{},
},
},
+ "docdb": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{
+ Hostname: "rds.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
"ds": service{
Endpoints: endpoints{
+ "fips-us-gov-east-1": endpoint{
+ Hostname: "ds-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "ds-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
"us-gov-east-1": endpoint{},
"us-gov-west-1": endpoint{},
},
@@ -4511,8 +7664,18 @@ var awsusgovPartition = partition{
"ec2": service{
Endpoints: endpoints{
- "us-gov-east-1": endpoint{},
- "us-gov-west-1": endpoint{},
+ "us-gov-east-1": endpoint{
+ Hostname: "ec2.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "us-gov-west-1": endpoint{
+ Hostname: "ec2.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
},
},
"ec2metadata": service{
@@ -4528,6 +7691,27 @@ var awsusgovPartition = partition{
},
"ecs": service{
+ Endpoints: endpoints{
+ "fips-us-gov-east-1": endpoint{
+ Hostname: "ecs-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "ecs-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "eks": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
Endpoints: endpoints{
"us-gov-east-1": endpoint{},
"us-gov-west-1": endpoint{},
@@ -4537,7 +7721,7 @@ var awsusgovPartition = partition{
Endpoints: endpoints{
"fips": endpoint{
- Hostname: "elasticache-fips.us-gov-west-1.amazonaws.com",
+ Hostname: "elasticache.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-west-1",
},
@@ -4549,19 +7733,54 @@ var awsusgovPartition = partition{
"elasticbeanstalk": service{
Endpoints: endpoints{
- "us-gov-east-1": endpoint{},
- "us-gov-west-1": endpoint{},
+ "us-gov-east-1": endpoint{
+ Hostname: "elasticbeanstalk.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "us-gov-west-1": endpoint{
+ Hostname: "elasticbeanstalk.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
},
},
"elasticfilesystem": service{
Endpoints: endpoints{
+ "fips-us-gov-east-1": endpoint{
+ Hostname: "elasticfilesystem-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "elasticfilesystem-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-east-1": endpoint{},
"us-gov-west-1": endpoint{},
},
},
"elasticloadbalancing": service{
Endpoints: endpoints{
+ "fips-us-gov-east-1": endpoint{
+ Hostname: "elasticloadbalancing-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "elasticloadbalancing-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
"us-gov-east-1": endpoint{},
"us-gov-west-1": endpoint{
Protocols: []string{"http", "https"},
@@ -4571,12 +7790,36 @@ var awsusgovPartition = partition{
"elasticmapreduce": service{
Endpoints: endpoints{
+ "fips-us-gov-east-1": endpoint{
+ Hostname: "elasticmapreduce.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "elasticmapreduce.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
"us-gov-east-1": endpoint{},
"us-gov-west-1": endpoint{
Protocols: []string{"https"},
},
},
},
+ "email": service{
+
+ Endpoints: endpoints{
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "email-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-west-1": endpoint{},
+ },
+ },
"es": service{
Endpoints: endpoints{
@@ -4593,13 +7836,35 @@ var awsusgovPartition = partition{
"events": service{
Endpoints: endpoints{
- "us-gov-east-1": endpoint{},
- "us-gov-west-1": endpoint{},
+ "us-gov-east-1": endpoint{
+ Hostname: "events.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "us-gov-west-1": endpoint{
+ Hostname: "events.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
},
},
"firehose": service{
Endpoints: endpoints{
+ "fips-us-gov-east-1": endpoint{
+ Hostname: "firehose-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "firehose-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
"us-gov-east-1": endpoint{},
"us-gov-west-1": endpoint{},
},
@@ -4607,15 +7872,36 @@ var awsusgovPartition = partition{
"glacier": service{
Endpoints: endpoints{
- "us-gov-east-1": endpoint{},
+ "us-gov-east-1": endpoint{
+ Hostname: "glacier.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
"us-gov-west-1": endpoint{
+ Hostname: "glacier.us-gov-west-1.amazonaws.com",
Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
},
},
},
"glue": service{
Endpoints: endpoints{
+ "fips-us-gov-east-1": endpoint{
+ Hostname: "glue-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "glue-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
"us-gov-east-1": endpoint{},
"us-gov-west-1": endpoint{},
},
@@ -4626,7 +7912,12 @@ var awsusgovPartition = partition{
Protocols: []string{"https"},
},
Endpoints: endpoints{
- "us-gov-west-1": endpoint{},
+ "us-gov-west-1": endpoint{
+ Hostname: "greengrass.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
},
},
"guardduty": service{
@@ -4636,6 +7927,12 @@ var awsusgovPartition = partition{
},
Endpoints: endpoints{
"us-gov-west-1": endpoint{},
+ "us-gov-west-1-fips": endpoint{
+ Hostname: "guardduty.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
},
},
"health": service{
@@ -4655,11 +7952,29 @@ var awsusgovPartition = partition{
Region: "us-gov-west-1",
},
},
+ "iam-govcloud-fips": endpoint{
+ Hostname: "iam.us-gov.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
},
},
"inspector": service{
Endpoints: endpoints{
+ "fips-us-gov-east-1": endpoint{
+ Hostname: "inspector-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "inspector-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
"us-gov-east-1": endpoint{},
"us-gov-west-1": endpoint{},
},
@@ -4674,8 +7989,40 @@ var awsusgovPartition = partition{
"us-gov-west-1": endpoint{},
},
},
+ "iotsecuredtunneling": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "kafka": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
"kinesis": service{
+ Endpoints: endpoints{
+ "fips-us-gov-east-1": endpoint{
+ Hostname: "kinesis-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "kinesis-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "kinesisanalytics": service{
+
Endpoints: endpoints{
"us-gov-east-1": endpoint{},
"us-gov-west-1": endpoint{},
@@ -4697,6 +8044,18 @@ var awsusgovPartition = partition{
"lambda": service{
Endpoints: endpoints{
+ "fips-us-gov-east-1": endpoint{
+ Hostname: "lambda-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "lambda-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
"us-gov-east-1": endpoint{},
"us-gov-west-1": endpoint{},
},
@@ -4704,6 +8063,18 @@ var awsusgovPartition = partition{
"license-manager": service{
Endpoints: endpoints{
+ "fips-us-gov-east-1": endpoint{
+ Hostname: "license-manager-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "license-manager-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
"us-gov-east-1": endpoint{},
"us-gov-west-1": endpoint{},
},
@@ -4711,14 +8082,29 @@ var awsusgovPartition = partition{
"logs": service{
Endpoints: endpoints{
- "us-gov-east-1": endpoint{},
- "us-gov-west-1": endpoint{},
+ "us-gov-east-1": endpoint{
+ Hostname: "logs.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "us-gov-west-1": endpoint{
+ Hostname: "logs.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
},
},
"mediaconvert": service{
Endpoints: endpoints{
- "us-gov-west-1": endpoint{},
+ "us-gov-west-1": endpoint{
+ Hostname: "mediaconvert.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
},
},
"metering.marketplace": service{
@@ -4735,6 +8121,18 @@ var awsusgovPartition = partition{
"monitoring": service{
Endpoints: endpoints{
+ "fips-us-gov-east-1": endpoint{
+ Hostname: "monitoring.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "monitoring.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
"us-gov-east-1": endpoint{},
"us-gov-west-1": endpoint{},
},
@@ -4767,11 +8165,50 @@ var awsusgovPartition = partition{
Region: "us-gov-west-1",
},
},
+ "fips-aws-us-gov-global": endpoint{
+ Hostname: "organizations.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "outposts": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{
+ Hostname: "outposts.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "us-gov-west-1": endpoint{
+ Hostname: "outposts.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "pinpoint": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "mobiletargeting",
+ },
+ },
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
},
},
"polly": service{
Endpoints: endpoints{
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "polly-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
"us-gov-west-1": endpoint{},
},
},
@@ -4785,6 +8222,18 @@ var awsusgovPartition = partition{
"rds": service{
Endpoints: endpoints{
+ "rds.us-gov-east-1": endpoint{
+ Hostname: "rds.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "rds.us-gov-west-1": endpoint{
+ Hostname: "rds.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
"us-gov-east-1": endpoint{},
"us-gov-west-1": endpoint{},
},
@@ -4792,13 +8241,48 @@ var awsusgovPartition = partition{
"redshift": service{
Endpoints: endpoints{
- "us-gov-east-1": endpoint{},
- "us-gov-west-1": endpoint{},
+ "us-gov-east-1": endpoint{
+ Hostname: "redshift.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "us-gov-west-1": endpoint{
+ Hostname: "redshift.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
},
},
"rekognition": service{
Endpoints: endpoints{
+ "rekognition-fips.us-gov-west-1": endpoint{
+ Hostname: "rekognition-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "resource-groups": service{
+
+ Endpoints: endpoints{
+ "fips-us-gov-east-1": endpoint{
+ Hostname: "resource-groups.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "resource-groups.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-east-1": endpoint{},
"us-gov-west-1": endpoint{},
},
},
@@ -4815,6 +8299,13 @@ var awsusgovPartition = partition{
},
},
},
+ "route53resolver": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
"runtime.sagemaker": service{
Endpoints: endpoints{
@@ -4824,6 +8315,9 @@ var awsusgovPartition = partition{
"s3": service{
Defaults: endpoint{
SignatureVersions: []string{"s3", "s3v4"},
+
+ HasDualStack: boxedTrue,
+ DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}",
},
Endpoints: endpoints{
"fips-us-gov-west-1": endpoint{
@@ -4846,6 +8340,9 @@ var awsusgovPartition = partition{
Defaults: endpoint{
Protocols: []string{"https"},
SignatureVersions: []string{"s3v4"},
+
+ HasDualStack: boxedTrue,
+ DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}",
},
Endpoints: endpoints{
"us-gov-east-1": endpoint{
@@ -4881,6 +8378,13 @@ var awsusgovPartition = partition{
"secretsmanager": service{
Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-east-1-fips": endpoint{
+ Hostname: "secretsmanager-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
"us-gov-west-1": endpoint{},
"us-gov-west-1-fips": endpoint{
Hostname: "secretsmanager-fips.us-gov-west-1.amazonaws.com",
@@ -4890,22 +8394,56 @@ var awsusgovPartition = partition{
},
},
},
+ "securityhub": service{
+
+ Endpoints: endpoints{
+ "fips-us-gov-east-1": endpoint{
+ Hostname: "securityhub-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "securityhub-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
"serverlessrepo": service{
Defaults: endpoint{
Protocols: []string{"https"},
},
Endpoints: endpoints{
"us-gov-east-1": endpoint{
+ Hostname: "serverlessrepo.us-gov-east-1.amazonaws.com",
Protocols: []string{"https"},
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
},
"us-gov-west-1": endpoint{
+ Hostname: "serverlessrepo.us-gov-west-1.amazonaws.com",
Protocols: []string{"https"},
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
},
},
},
"servicecatalog": service{
Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-east-1-fips": endpoint{
+ Hostname: "servicecatalog-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
"us-gov-west-1": endpoint{},
"us-gov-west-1-fips": endpoint{
Hostname: "servicecatalog-fips.us-gov-west-1.amazonaws.com",
@@ -4918,6 +8456,18 @@ var awsusgovPartition = partition{
"sms": service{
Endpoints: endpoints{
+ "fips-us-gov-east-1": endpoint{
+ Hostname: "sms-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "sms-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
"us-gov-east-1": endpoint{},
"us-gov-west-1": endpoint{},
},
@@ -4925,6 +8475,18 @@ var awsusgovPartition = partition{
"snowball": service{
Endpoints: endpoints{
+ "fips-us-gov-east-1": endpoint{
+ Hostname: "snowball-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "snowball-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
"us-gov-east-1": endpoint{},
"us-gov-west-1": endpoint{},
},
@@ -4932,25 +8494,67 @@ var awsusgovPartition = partition{
"sns": service{
Endpoints: endpoints{
- "us-gov-east-1": endpoint{},
+ "us-gov-east-1": endpoint{
+ Hostname: "sns.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
"us-gov-west-1": endpoint{
+ Hostname: "sns.us-gov-west-1.amazonaws.com",
Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
},
},
},
"sqs": service{
Endpoints: endpoints{
- "us-gov-east-1": endpoint{},
+ "us-gov-east-1": endpoint{
+ Hostname: "sqs.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
"us-gov-west-1": endpoint{
+ Hostname: "sqs.us-gov-west-1.amazonaws.com",
SSLCommonName: "{region}.queue.{dnsSuffix}",
Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
},
},
},
"ssm": service{
Endpoints: endpoints{
+ "fips-us-gov-east-1": endpoint{
+ Hostname: "ssm.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "ssm.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "ssm-facade-fips-us-gov-east-1": endpoint{
+ Hostname: "ssm-facade.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "ssm-facade-fips-us-gov-west-1": endpoint{
+ Hostname: "ssm-facade.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
"us-gov-east-1": endpoint{},
"us-gov-west-1": endpoint{},
},
@@ -4958,6 +8562,18 @@ var awsusgovPartition = partition{
"states": service{
Endpoints: endpoints{
+ "fips-us-gov-east-1": endpoint{
+ Hostname: "states-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "states.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
"us-gov-east-1": endpoint{},
"us-gov-west-1": endpoint{},
},
@@ -4965,6 +8581,13 @@ var awsusgovPartition = partition{
"storagegateway": service{
Endpoints: endpoints{
+ "fips": endpoint{
+ Hostname: "storagegateway-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-east-1": endpoint{},
"us-gov-west-1": endpoint{},
},
},
@@ -4995,14 +8618,54 @@ var awsusgovPartition = partition{
Endpoints: endpoints{
"us-gov-east-1": endpoint{},
+ "us-gov-east-1-fips": endpoint{
+ Hostname: "sts.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
"us-gov-west-1": endpoint{},
+ "us-gov-west-1-fips": endpoint{
+ Hostname: "sts.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "support": service{
+ PartitionEndpoint: "aws-us-gov-global",
+
+ Endpoints: endpoints{
+ "aws-us-gov-global": endpoint{
+ Hostname: "support.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "support.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
},
},
"swf": service{
Endpoints: endpoints{
- "us-gov-east-1": endpoint{},
- "us-gov-west-1": endpoint{},
+ "us-gov-east-1": endpoint{
+ Hostname: "swf.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "us-gov-west-1": endpoint{
+ Hostname: "swf.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
},
},
"tagging": service{
@@ -5012,6 +8675,27 @@ var awsusgovPartition = partition{
"us-gov-west-1": endpoint{},
},
},
+ "transcribe": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "fips-us-gov-east-1": endpoint{
+ Hostname: "fips.transcribe.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "fips.transcribe.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
"translate": service{
Defaults: endpoint{
Protocols: []string{"https"},
@@ -5029,7 +8713,18 @@ var awsusgovPartition = partition{
"waf-regional": service{
Endpoints: endpoints{
- "us-gov-west-1": endpoint{},
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "waf-regional-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-west-1": endpoint{
+ Hostname: "waf-regional.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
},
},
"workspaces": service{
@@ -5038,6 +8733,13 @@ var awsusgovPartition = partition{
"us-gov-west-1": endpoint{},
},
},
+ "xray": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
},
}
@@ -5078,13 +8780,21 @@ var awsisoPartition = partition{
},
},
},
+ "api.sagemaker": service{
+
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{},
+ },
+ },
+ "apigateway": service{
+
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{},
+ },
+ },
"application-autoscaling": service{
Defaults: endpoint{
- Hostname: "autoscaling.{region}.amazonaws.com",
Protocols: []string{"http", "https"},
- CredentialScope: credentialScope{
- Service: "application-autoscaling",
- },
},
Endpoints: endpoints{
"us-iso-east-1": endpoint{},
@@ -5116,6 +8826,14 @@ var awsisoPartition = partition{
"us-iso-east-1": endpoint{},
},
},
+ "comprehend": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{},
+ },
+ },
"config": service{
Endpoints: endpoints{
@@ -5137,6 +8855,12 @@ var awsisoPartition = partition{
"dms": service{
Endpoints: endpoints{
+ "dms-fips": endpoint{
+ Hostname: "dms.us-iso-east-1.c2s.ic.gov",
+ CredentialScope: credentialScope{
+ Region: "us-iso-east-1",
+ },
+ },
"us-iso-east-1": endpoint{},
},
},
@@ -5199,6 +8923,12 @@ var awsisoPartition = partition{
},
},
},
+ "es": service{
+
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{},
+ },
+ },
"events": service{
Endpoints: endpoints{
@@ -5293,6 +9023,12 @@ var awsisoPartition = partition{
},
},
},
+ "runtime.sagemaker": service{
+
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{},
+ },
+ },
"s3": service{
Defaults: endpoint{
SignatureVersions: []string{"s3v4"},
@@ -5369,6 +9105,20 @@ var awsisoPartition = partition{
"us-iso-east-1": endpoint{},
},
},
+ "transcribe": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{},
+ },
+ },
+ "transcribestreaming": service{
+
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{},
+ },
+ },
"workspaces": service{
Endpoints: endpoints{
@@ -5406,11 +9156,7 @@ var awsisobPartition = partition{
Services: services{
"application-autoscaling": service{
Defaults: endpoint{
- Hostname: "autoscaling.{region}.amazonaws.com",
Protocols: []string{"http", "https"},
- CredentialScope: credentialScope{
- Service: "application-autoscaling",
- },
},
Endpoints: endpoints{
"us-isob-east-1": endpoint{},
@@ -5451,6 +9197,12 @@ var awsisobPartition = partition{
"dms": service{
Endpoints: endpoints{
+ "dms-fips": endpoint{
+ Hostname: "dms.us-isob-east-1.sc2s.sgov.gov",
+ CredentialScope: credentialScope{
+ Region: "us-isob-east-1",
+ },
+ },
"us-isob-east-1": endpoint{},
},
},
@@ -5550,6 +9302,18 @@ var awsisobPartition = partition{
"us-isob-east-1": endpoint{},
},
},
+ "lambda": service{
+
+ Endpoints: endpoints{
+ "us-isob-east-1": endpoint{},
+ },
+ },
+ "license-manager": service{
+
+ Endpoints: endpoints{
+ "us-isob-east-1": endpoint{},
+ },
+ },
"logs": service{
Endpoints: endpoints{
@@ -5606,6 +9370,12 @@ var awsisobPartition = partition{
"us-isob-east-1": endpoint{},
},
},
+ "ssm": service{
+
+ Endpoints: endpoints{
+ "us-isob-east-1": endpoint{},
+ },
+ },
"states": service{
Endpoints: endpoints{
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go
index 9c936be6..ca956e5f 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go
@@ -3,6 +3,7 @@ package endpoints
import (
"fmt"
"regexp"
+ "strings"
"github.com/aws/aws-sdk-go/aws/awserr"
)
@@ -46,6 +47,108 @@ type Options struct {
//
// This option is ignored if StrictMatching is enabled.
ResolveUnknownService bool
+
+ // STS Regional Endpoint flag helps with resolving the STS endpoint
+ STSRegionalEndpoint STSRegionalEndpoint
+
+ // S3 Regional Endpoint flag helps with resolving the S3 endpoint
+ S3UsEast1RegionalEndpoint S3UsEast1RegionalEndpoint
+}
+
+// STSRegionalEndpoint is an enum for the states of the STS Regional Endpoint
+// options.
+type STSRegionalEndpoint int
+
+func (e STSRegionalEndpoint) String() string {
+ switch e {
+ case LegacySTSEndpoint:
+ return "legacy"
+ case RegionalSTSEndpoint:
+ return "regional"
+ case UnsetSTSEndpoint:
+ return ""
+ default:
+ return "unknown"
+ }
+}
+
+const (
+
+ // UnsetSTSEndpoint represents that STS Regional Endpoint flag is not specified.
+ UnsetSTSEndpoint STSRegionalEndpoint = iota
+
+ // LegacySTSEndpoint represents when STS Regional Endpoint flag is specified
+ // to use legacy endpoints.
+ LegacySTSEndpoint
+
+ // RegionalSTSEndpoint represents when STS Regional Endpoint flag is specified
+ // to use regional endpoints.
+ RegionalSTSEndpoint
+)
+
+// GetSTSRegionalEndpoint function returns the STSRegionalEndpointFlag based
+// on the input string provided in env config or shared config by the user.
+//
+// `legacy`, `regional` are the only case-insensitive valid strings for
+// resolving the STS regional Endpoint flag.
+func GetSTSRegionalEndpoint(s string) (STSRegionalEndpoint, error) {
+ switch {
+ case strings.EqualFold(s, "legacy"):
+ return LegacySTSEndpoint, nil
+ case strings.EqualFold(s, "regional"):
+ return RegionalSTSEndpoint, nil
+ default:
+ return UnsetSTSEndpoint, fmt.Errorf("unable to resolve the value of STSRegionalEndpoint for %v", s)
+ }
+}
+
+// S3UsEast1RegionalEndpoint is an enum for the states of the S3 us-east-1
+// Regional Endpoint options.
+type S3UsEast1RegionalEndpoint int
+
+func (e S3UsEast1RegionalEndpoint) String() string {
+ switch e {
+ case LegacyS3UsEast1Endpoint:
+ return "legacy"
+ case RegionalS3UsEast1Endpoint:
+ return "regional"
+ case UnsetS3UsEast1Endpoint:
+ return ""
+ default:
+ return "unknown"
+ }
+}
+
+const (
+
+ // UnsetS3UsEast1Endpoint represents that S3 Regional Endpoint flag is not
+ // specified.
+ UnsetS3UsEast1Endpoint S3UsEast1RegionalEndpoint = iota
+
+ // LegacyS3UsEast1Endpoint represents when S3 Regional Endpoint flag is
+ // specified to use legacy endpoints.
+ LegacyS3UsEast1Endpoint
+
+ // RegionalS3UsEast1Endpoint represents when S3 Regional Endpoint flag is
+ // specified to use regional endpoints.
+ RegionalS3UsEast1Endpoint
+)
+
+// GetS3UsEast1RegionalEndpoint function returns the S3UsEast1RegionalEndpointFlag based
+// on the input string provided in env config or shared config by the user.
+//
+// `legacy`, `regional` are the only case-insensitive valid strings for
+// resolving the S3 regional Endpoint flag.
+func GetS3UsEast1RegionalEndpoint(s string) (S3UsEast1RegionalEndpoint, error) {
+ switch {
+ case strings.EqualFold(s, "legacy"):
+ return LegacyS3UsEast1Endpoint, nil
+ case strings.EqualFold(s, "regional"):
+ return RegionalS3UsEast1Endpoint, nil
+ default:
+ return UnsetS3UsEast1Endpoint,
+ fmt.Errorf("unable to resolve the value of S3UsEast1RegionalEndpoint for %v", s)
+ }
}
// Set combines all of the option functions together.
@@ -79,6 +182,12 @@ func ResolveUnknownServiceOption(o *Options) {
o.ResolveUnknownService = true
}
+// STSRegionalEndpointOption enables the STS endpoint resolver behavior to resolve
+// STS endpoint to their regional endpoint, instead of the global endpoint.
+func STSRegionalEndpointOption(o *Options) {
+ o.STSRegionalEndpoint = RegionalSTSEndpoint
+}
+
// A Resolver provides the interface for functionality to resolve endpoints.
// The build in Partition and DefaultResolver return value satisfy this interface.
type Resolver interface {
@@ -194,7 +303,7 @@ func (p Partition) ID() string { return p.id }
// require the provided service and region to be known by the partition.
// If the endpoint cannot be strictly resolved an error will be returned. This
// mode is useful to ensure the endpoint resolved is valid. Without
-// StrictMatching enabled the endpoint returned my look valid but may not work.
+// StrictMatching enabled the endpoint returned may look valid but may not work.
// StrictMatching requires the SDK to be updated if you want to take advantage
// of new regions and services expansions.
//
@@ -208,7 +317,7 @@ func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) (
// Regions returns a map of Regions indexed by their ID. This is useful for
// enumerating over the regions in a partition.
func (p Partition) Regions() map[string]Region {
- rs := map[string]Region{}
+ rs := make(map[string]Region, len(p.p.Regions))
for id, r := range p.p.Regions {
rs[id] = Region{
id: id,
@@ -223,7 +332,7 @@ func (p Partition) Regions() map[string]Region {
// Services returns a map of Service indexed by their ID. This is useful for
// enumerating over the services in a partition.
func (p Partition) Services() map[string]Service {
- ss := map[string]Service{}
+ ss := make(map[string]Service, len(p.p.Services))
for id := range p.p.Services {
ss[id] = Service{
id: id,
@@ -310,7 +419,7 @@ func (s Service) Regions() map[string]Region {
// A region is the AWS region the service exists in. Whereas a Endpoint is
// an URL that can be resolved to a instance of a service.
func (s Service) Endpoints() map[string]Endpoint {
- es := map[string]Endpoint{}
+ es := make(map[string]Endpoint, len(s.p.Services[s.id].Endpoints))
for id := range s.p.Services[s.id].Endpoints {
es[id] = Endpoint{
id: id,
@@ -350,6 +459,9 @@ type ResolvedEndpoint struct {
// The endpoint URL
URL string
+ // The endpoint partition
+ PartitionID string
+
// The region that should be used for signing requests.
SigningRegion string
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go
new file mode 100644
index 00000000..df75e899
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go
@@ -0,0 +1,24 @@
+package endpoints
+
+var legacyGlobalRegions = map[string]map[string]struct{}{
+ "sts": {
+ "ap-northeast-1": {},
+ "ap-south-1": {},
+ "ap-southeast-1": {},
+ "ap-southeast-2": {},
+ "ca-central-1": {},
+ "eu-central-1": {},
+ "eu-north-1": {},
+ "eu-west-1": {},
+ "eu-west-2": {},
+ "eu-west-3": {},
+ "sa-east-1": {},
+ "us-east-1": {},
+ "us-east-2": {},
+ "us-west-1": {},
+ "us-west-2": {},
+ },
+ "s3": {
+ "us-east-1": {},
+ },
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go
index 523ad79a..77361372 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go
@@ -7,6 +7,8 @@ import (
"strings"
)
+var regionValidationRegex = regexp.MustCompile(`^[[:alnum:]]([[:alnum:]\-]*[[:alnum:]])?$`)
+
type partitions []partition
func (ps partitions) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
@@ -75,24 +77,56 @@ func (p partition) canResolveEndpoint(service, region string, strictMatch bool)
return p.RegionRegex.MatchString(region)
}
+func allowLegacyEmptyRegion(service string) bool {
+ legacy := map[string]struct{}{
+ "budgets": {},
+ "ce": {},
+ "chime": {},
+ "cloudfront": {},
+ "ec2metadata": {},
+ "iam": {},
+ "importexport": {},
+ "organizations": {},
+ "route53": {},
+ "sts": {},
+ "support": {},
+ "waf": {},
+ }
+
+ _, allowed := legacy[service]
+ return allowed
+}
+
func (p partition) EndpointFor(service, region string, opts ...func(*Options)) (resolved ResolvedEndpoint, err error) {
var opt Options
opt.Set(opts...)
s, hasService := p.Services[service]
- if !(hasService || opt.ResolveUnknownService) {
+ if len(service) == 0 || !(hasService || opt.ResolveUnknownService) {
// Only return error if the resolver will not fallback to creating
// endpoint based on service endpoint ID passed in.
return resolved, NewUnknownServiceError(p.ID, service, serviceList(p.Services))
}
+ if len(region) == 0 && allowLegacyEmptyRegion(service) && len(s.PartitionEndpoint) != 0 {
+ region = s.PartitionEndpoint
+ }
+
+ if (service == "sts" && opt.STSRegionalEndpoint != RegionalSTSEndpoint) ||
+ (service == "s3" && opt.S3UsEast1RegionalEndpoint != RegionalS3UsEast1Endpoint) {
+ if _, ok := legacyGlobalRegions[service][region]; ok {
+ region = "aws-global"
+ }
+ }
+
e, hasEndpoint := s.endpointForRegion(region)
- if !hasEndpoint && opt.StrictMatching {
+ if len(region) == 0 || (!hasEndpoint && opt.StrictMatching) {
return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(s.Endpoints))
}
defs := []endpoint{p.Defaults, s.Defaults}
- return e.resolve(service, region, p.DNSSuffix, defs, opt), nil
+
+ return e.resolve(service, p.ID, region, p.DNSSuffix, defs, opt)
}
func serviceList(ss services) []string {
@@ -201,7 +235,7 @@ func getByPriority(s []string, p []string, def string) string {
return s[0]
}
-func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, opts Options) ResolvedEndpoint {
+func (e endpoint) resolve(service, partitionID, region, dnsSuffix string, defs []endpoint, opts Options) (ResolvedEndpoint, error) {
var merged endpoint
for _, def := range defs {
merged.mergeIn(def)
@@ -209,11 +243,27 @@ func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, op
merged.mergeIn(e)
e = merged
- hostname := e.Hostname
+ signingRegion := e.CredentialScope.Region
+ if len(signingRegion) == 0 {
+ signingRegion = region
+ }
+ signingName := e.CredentialScope.Service
+ var signingNameDerived bool
+ if len(signingName) == 0 {
+ signingName = service
+ signingNameDerived = true
+ }
+
+ hostname := e.Hostname
// Offset the hostname for dualstack if enabled
if opts.UseDualStack && e.HasDualStack == boxedTrue {
hostname = e.DualStackHostname
+ region = signingRegion
+ }
+
+ if !validateInputRegion(region) {
+ return ResolvedEndpoint{}, fmt.Errorf("invalid region identifier format provided")
}
u := strings.Replace(hostname, "{service}", service, 1)
@@ -223,25 +273,14 @@ func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, op
scheme := getEndpointScheme(e.Protocols, opts.DisableSSL)
u = fmt.Sprintf("%s://%s", scheme, u)
- signingRegion := e.CredentialScope.Region
- if len(signingRegion) == 0 {
- signingRegion = region
- }
-
- signingName := e.CredentialScope.Service
- var signingNameDerived bool
- if len(signingName) == 0 {
- signingName = service
- signingNameDerived = true
- }
-
return ResolvedEndpoint{
URL: u,
+ PartitionID: partitionID,
SigningRegion: signingRegion,
SigningName: signingName,
SigningNameDerived: signingNameDerived,
SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner),
- }
+ }, nil
}
func getEndpointScheme(protocols []string, disableSSL bool) string {
@@ -306,3 +345,7 @@ const (
boxedFalse
boxedTrue
)
+
+func validateInputRegion(region string) bool {
+ return regionValidationRegex.MatchString(region)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
index 185b0731..e819ab6c 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
@@ -10,6 +10,7 @@ import (
type Handlers struct {
Validate HandlerList
Build HandlerList
+ BuildStream HandlerList
Sign HandlerList
Send HandlerList
ValidateResponse HandlerList
@@ -28,6 +29,7 @@ func (h *Handlers) Copy() Handlers {
return Handlers{
Validate: h.Validate.copy(),
Build: h.Build.copy(),
+ BuildStream: h.BuildStream.copy(),
Sign: h.Sign.copy(),
Send: h.Send.copy(),
ValidateResponse: h.ValidateResponse.copy(),
@@ -46,6 +48,7 @@ func (h *Handlers) Copy() Handlers {
func (h *Handlers) Clear() {
h.Validate.Clear()
h.Build.Clear()
+ h.BuildStream.Clear()
h.Send.Clear()
h.Sign.Clear()
h.Unmarshal.Clear()
@@ -67,6 +70,9 @@ func (h *Handlers) IsEmpty() bool {
if h.Build.Len() != 0 {
return false
}
+ if h.BuildStream.Len() != 0 {
+ return false
+ }
if h.Send.Len() != 0 {
return false
}
@@ -320,3 +326,18 @@ func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) {
AddToUserAgent(r, s)
}
}
+
+// WithSetRequestHeaders updates the operation request's HTTP header to contain
+// the header key value pairs provided. If the header key already exists in the
+// request's HTTP header set, the existing value(s) will be replaced.
+func WithSetRequestHeaders(h map[string]string) Option {
+ return withRequestHeader(h).SetRequestHeaders
+}
+
+type withRequestHeader map[string]string
+
+func (h withRequestHeader) SetRequestHeaders(r *Request) {
+ for k, v := range h {
+ r.HTTPRequest.Header[k] = []string{v}
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go
index 8e332cce..d597c6ea 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go
@@ -36,6 +36,10 @@ const (
// API request that was canceled. Requests given a aws.Context may
// return this error when canceled.
CanceledErrorCode = "RequestCanceled"
+
+ // ErrCodeRequestError is an error preventing the SDK from continuing to
+ // process the request.
+ ErrCodeRequestError = "RequestError"
)
// A Request is the service request to be made.
@@ -51,6 +55,7 @@ type Request struct {
HTTPRequest *http.Request
HTTPResponse *http.Response
Body io.ReadSeeker
+ streamingBody io.ReadCloser
BodyStart int64 // offset from beginning of Body that the request body starts
Params interface{}
Error error
@@ -99,8 +104,12 @@ type Operation struct {
BeforePresignFn func(r *Request) error
}
-// New returns a new Request pointer for the service API
-// operation and parameters.
+// New returns a new Request pointer for the service API operation and
+// parameters.
+//
+// A Retryer should be provided to direct how the request is retried. If
+// Retryer is nil, a default no retry value will be used. You can use
+// NoOpRetryer in the Client package to disable retry behavior directly.
//
// Params is any value of input parameters to be the request payload.
// Data is pointer value to an object which the request's response
@@ -108,6 +117,10 @@ type Operation struct {
func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers,
retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request {
+ if retryer == nil {
+ retryer = noOpRetryer{}
+ }
+
method := operation.HTTPMethod
if method == "" {
method = "POST"
@@ -122,8 +135,6 @@ func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers,
err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err)
}
- SanitizeHostForHeader(httpReq)
-
r := &Request{
Config: cfg,
ClientInfo: clientInfo,
@@ -287,6 +298,13 @@ func (r *Request) SetReaderBody(reader io.ReadSeeker) {
r.ResetBody()
}
+// SetStreamingBody set the reader to be used for the request that will stream
+// bytes to the server. Request's Body must not be set to any reader.
+func (r *Request) SetStreamingBody(reader io.ReadCloser) {
+ r.streamingBody = reader
+ r.SetReaderBody(aws.ReadSeekCloser(reader))
+}
+
// Presign returns the request's signed URL. Error will be returned
// if the signing fails. The expire parameter is only used for presigned Amazon
// S3 API requests. All other AWS services will use a fixed expiration
@@ -406,11 +424,17 @@ func (r *Request) Sign() error {
return r.Error
}
+ SanitizeHostForHeader(r.HTTPRequest)
+
r.Handlers.Sign.Run(r)
return r.Error
}
func (r *Request) getNextRequestBody() (body io.ReadCloser, err error) {
+ if r.streamingBody != nil {
+ return r.streamingBody, nil
+ }
+
if r.safeBody != nil {
r.safeBody.Close()
}
@@ -615,6 +639,10 @@ func getHost(r *http.Request) string {
return r.Host
}
+ if r.URL == nil {
+ return ""
+ }
+
return r.URL.Host
}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
index f093fc54..64784e16 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
@@ -17,11 +17,13 @@ import (
// does the pagination between API operations, and Paginator defines the
// configuration that will be used per page request.
//
-// cont := true
-// for p.Next() && cont {
+// for p.Next() {
// data := p.Page().(*s3.ListObjectsOutput)
// // process the page's data
+// // ...
+// // break out of loop to stop fetching additional pages
// }
+//
// return p.Err()
//
// See service client API operation Pages methods for examples how the SDK will
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
index e84084da..752ae47f 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
@@ -35,16 +35,47 @@ type Retryer interface {
}
// WithRetryer sets a Retryer value to the given Config returning the Config
-// value for chaining.
+// value for chaining. The value must not be nil.
func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config {
+ if retryer == nil {
+ if cfg.Logger != nil {
+ cfg.Logger.Log("ERROR: Request.WithRetryer called with nil retryer. Replacing with retry disabled Retryer.")
+ }
+ retryer = noOpRetryer{}
+ }
cfg.Retryer = retryer
return cfg
+
+}
+
+// noOpRetryer is a internal no op retryer used when a request is created
+// without a retryer.
+//
+// Provides a retryer that performs no retries.
+// It should be used when we do not want retries to be performed.
+type noOpRetryer struct{}
+
+// MaxRetries returns the number of maximum returns the service will use to make
+// an individual API; For NoOpRetryer the MaxRetries will always be zero.
+func (d noOpRetryer) MaxRetries() int {
+ return 0
+}
+
+// ShouldRetry will always return false for NoOpRetryer, as it should never retry.
+func (d noOpRetryer) ShouldRetry(_ *Request) bool {
+ return false
+}
+
+// RetryRules returns the delay duration before retrying this request again;
+// since NoOpRetryer does not retry, RetryRules always returns 0.
+func (d noOpRetryer) RetryRules(_ *Request) time.Duration {
+ return 0
}
// retryableCodes is a collection of service response codes which are retry-able
// without any further action.
var retryableCodes = map[string]struct{}{
- "RequestError": {},
+ ErrCodeRequestError: {},
"RequestTimeout": {},
ErrCodeResponseTimeout: {},
"RequestTimeoutException": {}, // Glacier's flavor of RequestTimeout
@@ -52,6 +83,7 @@ var retryableCodes = map[string]struct{}{
var throttleCodes = map[string]struct{}{
"ProvisionedThroughputExceededException": {},
+ "ThrottledException": {}, // SNS, XRay, ResourceGroupsTagging API
"Throttling": {},
"ThrottlingException": {},
"RequestLimitExceeded": {},
@@ -60,6 +92,7 @@ var throttleCodes = map[string]struct{}{
"TooManyRequestsException": {}, // Lambda functions
"PriorRequestNotComplete": {}, // Route53
"TransactionInProgressException": {},
+ "EC2ThrottledException": {}, // EC2
}
// credsExpiredCodes is a collection of error codes which signify the credentials
@@ -145,8 +178,8 @@ func shouldRetryError(origErr error) bool {
origErr := err.OrigErr()
var shouldRetry bool
if origErr != nil {
- shouldRetry := shouldRetryError(origErr)
- if err.Code() == "RequestError" && !shouldRetry {
+ shouldRetry = shouldRetryError(origErr)
+ if err.Code() == ErrCodeRequestError && !shouldRetry {
return false
}
}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go
index 7713ccfc..fe6dac1f 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go
@@ -3,6 +3,7 @@ package session
import (
"fmt"
"os"
+ "time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
@@ -47,10 +48,10 @@ func resolveCredentials(cfg *aws.Config,
}
// WebIdentityEmptyRoleARNErr will occur if 'AWS_WEB_IDENTITY_TOKEN_FILE' was set but
-// 'AWS_IAM_ROLE_ARN' was not set.
+// 'AWS_ROLE_ARN' was not set.
var WebIdentityEmptyRoleARNErr = awserr.New(stscreds.ErrCodeWebIdentity, "role ARN is not set", nil)
-// WebIdentityEmptyTokenFilePathErr will occur if 'AWS_IAM_ROLE_ARN' was set but
+// WebIdentityEmptyTokenFilePathErr will occur if 'AWS_ROLE_ARN' was set but
// 'AWS_WEB_IDENTITY_TOKEN_FILE' was not set.
var WebIdentityEmptyTokenFilePathErr = awserr.New(stscreds.ErrCodeWebIdentity, "token file path is not set", nil)
@@ -206,7 +207,14 @@ func credsFromAssumeRole(cfg aws.Config,
sharedCfg.RoleARN,
func(opt *stscreds.AssumeRoleProvider) {
opt.RoleSessionName = sharedCfg.RoleSessionName
- opt.Duration = sessOpts.AssumeRoleDuration
+
+ if sessOpts.AssumeRoleDuration == 0 &&
+ sharedCfg.AssumeRoleDuration != nil &&
+ *sharedCfg.AssumeRoleDuration/time.Minute > 15 {
+ opt.Duration = *sharedCfg.AssumeRoleDuration
+ } else if sessOpts.AssumeRoleDuration != 0 {
+ opt.Duration = sessOpts.AssumeRoleDuration
+ }
// Assume role with external ID
if len(sharedCfg.ExternalID) > 0 {
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
index 60a6f9ce..c1e0e9c9 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
@@ -1,12 +1,15 @@
package session
import (
+ "fmt"
"os"
"strconv"
+ "strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/defaults"
+ "github.com/aws/aws-sdk-go/aws/endpoints"
)
// EnvProviderName provides a name of the provider when config is loaded from environment.
@@ -125,6 +128,26 @@ type envConfig struct {
//
// AWS_ROLE_SESSION_NAME=session_name
RoleSessionName string
+
+ // Specifies the STS Regional Endpoint flag for the SDK to resolve the endpoint
+ // for a service.
+ //
+ // AWS_STS_REGIONAL_ENDPOINTS=regional
+ // This can take value as `regional` or `legacy`
+ STSRegionalEndpoint endpoints.STSRegionalEndpoint
+
+ // Specifies the S3 Regional Endpoint flag for the SDK to resolve the
+ // endpoint for a service.
+ //
+ // AWS_S3_US_EAST_1_REGIONAL_ENDPOINT=regional
+ // This can take value as `regional` or `legacy`
+ S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint
+
+ // Specifies if the S3 service should allow ARNs to direct the region
+ // the client's requests are sent to.
+ //
+ // AWS_S3_USE_ARN_REGION=true
+ S3UseARNRegion bool
}
var (
@@ -179,6 +202,15 @@ var (
roleSessionNameEnvKey = []string{
"AWS_ROLE_SESSION_NAME",
}
+ stsRegionalEndpointKey = []string{
+ "AWS_STS_REGIONAL_ENDPOINTS",
+ }
+ s3UsEast1RegionalEndpoint = []string{
+ "AWS_S3_US_EAST_1_REGIONAL_ENDPOINT",
+ }
+ s3UseARNRegionEnvKey = []string{
+ "AWS_S3_USE_ARN_REGION",
+ }
)
// loadEnvConfig retrieves the SDK's environment configuration.
@@ -187,7 +219,7 @@ var (
// If the environment variable `AWS_SDK_LOAD_CONFIG` is set to a truthy value
// the shared SDK config will be loaded in addition to the SDK's specific
// configuration values.
-func loadEnvConfig() envConfig {
+func loadEnvConfig() (envConfig, error) {
enableSharedConfig, _ := strconv.ParseBool(os.Getenv("AWS_SDK_LOAD_CONFIG"))
return envConfigLoad(enableSharedConfig)
}
@@ -198,11 +230,11 @@ func loadEnvConfig() envConfig {
// Loads the shared configuration in addition to the SDK's specific configuration.
// This will load the same values as `loadEnvConfig` if the `AWS_SDK_LOAD_CONFIG`
// environment variable is set.
-func loadSharedEnvConfig() envConfig {
+func loadSharedEnvConfig() (envConfig, error) {
return envConfigLoad(true)
}
-func envConfigLoad(enableSharedConfig bool) envConfig {
+func envConfigLoad(enableSharedConfig bool) (envConfig, error) {
cfg := envConfig{}
cfg.EnableSharedConfig = enableSharedConfig
@@ -264,12 +296,48 @@ func envConfigLoad(enableSharedConfig bool) envConfig {
cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE")
- return cfg
+ var err error
+ // STS Regional Endpoint variable
+ for _, k := range stsRegionalEndpointKey {
+ if v := os.Getenv(k); len(v) != 0 {
+ cfg.STSRegionalEndpoint, err = endpoints.GetSTSRegionalEndpoint(v)
+ if err != nil {
+ return cfg, fmt.Errorf("failed to load, %v from env config, %v", k, err)
+ }
+ }
+ }
+
+ // S3 Regional Endpoint variable
+ for _, k := range s3UsEast1RegionalEndpoint {
+ if v := os.Getenv(k); len(v) != 0 {
+ cfg.S3UsEast1RegionalEndpoint, err = endpoints.GetS3UsEast1RegionalEndpoint(v)
+ if err != nil {
+ return cfg, fmt.Errorf("failed to load, %v from env config, %v", k, err)
+ }
+ }
+ }
+
+ var s3UseARNRegion string
+ setFromEnvVal(&s3UseARNRegion, s3UseARNRegionEnvKey)
+ if len(s3UseARNRegion) != 0 {
+ switch {
+ case strings.EqualFold(s3UseARNRegion, "false"):
+ cfg.S3UseARNRegion = false
+ case strings.EqualFold(s3UseARNRegion, "true"):
+ cfg.S3UseARNRegion = true
+ default:
+ return envConfig{}, fmt.Errorf(
+ "invalid value for environment variable, %s=%s, need true or false",
+ s3UseARNRegionEnvKey[0], s3UseARNRegion)
+ }
+ }
+
+ return cfg, nil
}
func setFromEnvVal(dst *string, keys []string) {
for _, k := range keys {
- if v := os.Getenv(k); len(v) > 0 {
+ if v := os.Getenv(k); len(v) != 0 {
*dst = v
break
}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
index 7b0a942e..0ff49960 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
@@ -73,7 +73,7 @@ type Session struct {
// func is called instead of waiting to receive an error until a request is made.
func New(cfgs ...*aws.Config) *Session {
// load initial config from environment
- envCfg := loadEnvConfig()
+ envCfg, envErr := loadEnvConfig()
if envCfg.EnableSharedConfig {
var cfg aws.Config
@@ -93,17 +93,17 @@ func New(cfgs ...*aws.Config) *Session {
// Session creation failed, need to report the error and prevent
// any requests from succeeding.
s = &Session{Config: defaults.Config()}
- s.Config.MergeIn(cfgs...)
- s.Config.Logger.Log("ERROR:", msg, "Error:", err)
- s.Handlers.Validate.PushBack(func(r *request.Request) {
- r.Error = err
- })
+ s.logDeprecatedNewSessionError(msg, err, cfgs)
}
return s
}
s := deprecatedNewSession(cfgs...)
+ if envErr != nil {
+ msg := "failed to load env config"
+ s.logDeprecatedNewSessionError(msg, envErr, cfgs)
+ }
if csmCfg, err := loadCSMConfig(envCfg, []string{}); err != nil {
if l := s.Config.Logger; l != nil {
@@ -112,11 +112,8 @@ func New(cfgs ...*aws.Config) *Session {
} else if csmCfg.Enabled {
err := enableCSM(&s.Handlers, csmCfg, s.Config.Logger)
if err != nil {
- err = fmt.Errorf("failed to enable CSM, %v", err)
- s.Config.Logger.Log("ERROR:", err.Error())
- s.Handlers.Validate.PushBack(func(r *request.Request) {
- r.Error = err
- })
+ msg := "failed to enable CSM"
+ s.logDeprecatedNewSessionError(msg, err, cfgs)
}
}
@@ -279,10 +276,17 @@ type Options struct {
// }))
func NewSessionWithOptions(opts Options) (*Session, error) {
var envCfg envConfig
+ var err error
if opts.SharedConfigState == SharedConfigEnable {
- envCfg = loadSharedEnvConfig()
+ envCfg, err = loadSharedEnvConfig()
+ if err != nil {
+ return nil, fmt.Errorf("failed to load shared config, %v", err)
+ }
} else {
- envCfg = loadEnvConfig()
+ envCfg, err = loadEnvConfig()
+ if err != nil {
+ return nil, fmt.Errorf("failed to load environment config, %v", err)
+ }
}
if len(opts.Profile) != 0 {
@@ -550,6 +554,22 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config,
}
}
+ // Regional Endpoint flag for STS endpoint resolving
+ mergeSTSRegionalEndpointConfig(cfg, []endpoints.STSRegionalEndpoint{
+ userCfg.STSRegionalEndpoint,
+ envCfg.STSRegionalEndpoint,
+ sharedCfg.STSRegionalEndpoint,
+ endpoints.LegacySTSEndpoint,
+ })
+
+ // Regional Endpoint flag for S3 endpoint resolving
+ mergeS3UsEast1RegionalEndpointConfig(cfg, []endpoints.S3UsEast1RegionalEndpoint{
+ userCfg.S3UsEast1RegionalEndpoint,
+ envCfg.S3UsEast1RegionalEndpoint,
+ sharedCfg.S3UsEast1RegionalEndpoint,
+ endpoints.LegacyS3UsEast1Endpoint,
+ })
+
// Configure credentials if not already set by the user when creating the
// Session.
if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil {
@@ -560,9 +580,35 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config,
cfg.Credentials = creds
}
+ cfg.S3UseARNRegion = userCfg.S3UseARNRegion
+ if cfg.S3UseARNRegion == nil {
+ cfg.S3UseARNRegion = &envCfg.S3UseARNRegion
+ }
+ if cfg.S3UseARNRegion == nil {
+ cfg.S3UseARNRegion = &sharedCfg.S3UseARNRegion
+ }
+
return nil
}
+func mergeSTSRegionalEndpointConfig(cfg *aws.Config, values []endpoints.STSRegionalEndpoint) {
+ for _, v := range values {
+ if v != endpoints.UnsetSTSEndpoint {
+ cfg.STSRegionalEndpoint = v
+ break
+ }
+ }
+}
+
+func mergeS3UsEast1RegionalEndpointConfig(cfg *aws.Config, values []endpoints.S3UsEast1RegionalEndpoint) {
+ for _, v := range values {
+ if v != endpoints.UnsetS3UsEast1Endpoint {
+ cfg.S3UsEast1RegionalEndpoint = v
+ break
+ }
+ }
+}
+
func initHandlers(s *Session) {
// Add the Validate parameter handler if it is not disabled.
s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler)
@@ -591,47 +637,67 @@ func (s *Session) Copy(cfgs ...*aws.Config) *Session {
// ClientConfig satisfies the client.ConfigProvider interface and is used to
// configure the service client instances. Passing the Session to the service
// client's constructor (New) will use this method to configure the client.
-func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config {
- // Backwards compatibility, the error will be eaten if user calls ClientConfig
- // directly. All SDK services will use ClientconfigWithError.
- cfg, _ := s.clientConfigWithErr(serviceName, cfgs...)
-
- return cfg
-}
-
-func (s *Session) clientConfigWithErr(serviceName string, cfgs ...*aws.Config) (client.Config, error) {
+func (s *Session) ClientConfig(service string, cfgs ...*aws.Config) client.Config {
s = s.Copy(cfgs...)
- var resolved endpoints.ResolvedEndpoint
- var err error
-
region := aws.StringValue(s.Config.Region)
-
- if endpoint := aws.StringValue(s.Config.Endpoint); len(endpoint) != 0 {
- resolved.URL = endpoints.AddScheme(endpoint, aws.BoolValue(s.Config.DisableSSL))
- resolved.SigningRegion = region
- } else {
- resolved, err = s.Config.EndpointResolver.EndpointFor(
- serviceName, region,
- func(opt *endpoints.Options) {
- opt.DisableSSL = aws.BoolValue(s.Config.DisableSSL)
- opt.UseDualStack = aws.BoolValue(s.Config.UseDualStack)
-
- // Support the condition where the service is modeled but its
- // endpoint metadata is not available.
- opt.ResolveUnknownService = true
- },
- )
+ resolved, err := s.resolveEndpoint(service, region, s.Config)
+ if err != nil {
+ s.Handlers.Validate.PushBack(func(r *request.Request) {
+ if len(r.ClientInfo.Endpoint) != 0 {
+ // Error occurred while resolving endpoint, but the request
+ // being invoked has had an endpoint specified after the client
+ // was created.
+ return
+ }
+ r.Error = err
+ })
}
return client.Config{
Config: s.Config,
Handlers: s.Handlers,
+ PartitionID: resolved.PartitionID,
Endpoint: resolved.URL,
SigningRegion: resolved.SigningRegion,
SigningNameDerived: resolved.SigningNameDerived,
SigningName: resolved.SigningName,
- }, err
+ }
+}
+
+func (s *Session) resolveEndpoint(service, region string, cfg *aws.Config) (endpoints.ResolvedEndpoint, error) {
+
+ if ep := aws.StringValue(cfg.Endpoint); len(ep) != 0 {
+ return endpoints.ResolvedEndpoint{
+ URL: endpoints.AddScheme(ep, aws.BoolValue(cfg.DisableSSL)),
+ SigningRegion: region,
+ }, nil
+ }
+
+ resolved, err := cfg.EndpointResolver.EndpointFor(service, region,
+ func(opt *endpoints.Options) {
+ opt.DisableSSL = aws.BoolValue(cfg.DisableSSL)
+ opt.UseDualStack = aws.BoolValue(cfg.UseDualStack)
+ // Support for STSRegionalEndpoint where the STSRegionalEndpoint is
+ // provided in envConfig or sharedConfig with envConfig getting
+ // precedence.
+ opt.STSRegionalEndpoint = cfg.STSRegionalEndpoint
+
+ // Support for S3UsEast1RegionalEndpoint where the S3UsEast1RegionalEndpoint is
+ // provided in envConfig or sharedConfig with envConfig getting
+ // precedence.
+ opt.S3UsEast1RegionalEndpoint = cfg.S3UsEast1RegionalEndpoint
+
+ // Support the condition where the service is modeled but its
+ // endpoint metadata is not available.
+ opt.ResolveUnknownService = true
+ },
+ )
+ if err != nil {
+ return endpoints.ResolvedEndpoint{}, err
+ }
+
+ return resolved, nil
}
// ClientConfigNoResolveEndpoint is the same as ClientConfig with the exception
@@ -641,12 +707,9 @@ func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Conf
s = s.Copy(cfgs...)
var resolved endpoints.ResolvedEndpoint
-
- region := aws.StringValue(s.Config.Region)
-
if ep := aws.StringValue(s.Config.Endpoint); len(ep) > 0 {
resolved.URL = endpoints.AddScheme(ep, aws.BoolValue(s.Config.DisableSSL))
- resolved.SigningRegion = region
+ resolved.SigningRegion = aws.StringValue(s.Config.Region)
}
return client.Config{
@@ -658,3 +721,14 @@ func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Conf
SigningName: resolved.SigningName,
}
}
+
+// logDeprecatedNewSessionError function enables error handling for session
+func (s *Session) logDeprecatedNewSessionError(msg string, err error, cfgs []*aws.Config) {
+ // Session creation failed, need to report the error and prevent
+ // any requests from succeeding.
+ s.Config.MergeIn(cfgs...)
+ s.Config.Logger.Log("ERROR:", msg, "Error:", err)
+ s.Handlers.Validate.PushBack(func(r *request.Request) {
+ r.Error = err
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
index d91ac93a..680805a3 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
@@ -2,9 +2,11 @@ package session
import (
"fmt"
+ "time"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-sdk-go/internal/ini"
)
@@ -15,12 +17,13 @@ const (
sessionTokenKey = `aws_session_token` // optional
// Assume Role Credentials group
- roleArnKey = `role_arn` // group required
- sourceProfileKey = `source_profile` // group required (or credential_source)
- credentialSourceKey = `credential_source` // group required (or source_profile)
- externalIDKey = `external_id` // optional
- mfaSerialKey = `mfa_serial` // optional
- roleSessionNameKey = `role_session_name` // optional
+ roleArnKey = `role_arn` // group required
+ sourceProfileKey = `source_profile` // group required (or credential_source)
+ credentialSourceKey = `credential_source` // group required (or source_profile)
+ externalIDKey = `external_id` // optional
+ mfaSerialKey = `mfa_serial` // optional
+ roleSessionNameKey = `role_session_name` // optional
+ roleDurationSecondsKey = "duration_seconds" // optional
// CSM options
csmEnabledKey = `csm_enabled`
@@ -40,10 +43,19 @@ const (
// Web Identity Token File
webIdentityTokenFileKey = `web_identity_token_file` // optional
+ // Additional config fields for regional or legacy endpoints
+ stsRegionalEndpointSharedKey = `sts_regional_endpoints`
+
+ // Additional config fields for regional or legacy endpoints
+ s3UsEast1RegionalSharedKey = `s3_us_east_1_regional_endpoint`
+
// DefaultSharedConfigProfile is the default profile to be used when
// loading configuration from the config files if another profile name
// is not provided.
DefaultSharedConfigProfile = `default`
+
+ // S3 ARN Region Usage
+ s3UseARNRegionKey = "s3_use_arn_region"
)
// sharedConfig represents the configuration fields of the SDK config files.
@@ -63,10 +75,11 @@ type sharedConfig struct {
CredentialProcess string
WebIdentityTokenFile string
- RoleARN string
- RoleSessionName string
- ExternalID string
- MFASerial string
+ RoleARN string
+ RoleSessionName string
+ ExternalID string
+ MFASerial string
+ AssumeRoleDuration *time.Duration
SourceProfileName string
SourceProfile *sharedConfig
@@ -88,6 +101,24 @@ type sharedConfig struct {
CSMHost string
CSMPort string
CSMClientID string
+
+ // Specifies the Regional Endpoint flag for the SDK to resolve the endpoint for a service
+ //
+ // sts_regional_endpoints = regional
+ // This can take value as `LegacySTSEndpoint` or `RegionalSTSEndpoint`
+ STSRegionalEndpoint endpoints.STSRegionalEndpoint
+
+ // Specifies the Regional Endpoint flag for the SDK to resolve the endpoint for a service
+ //
+ // s3_us_east_1_regional_endpoint = regional
+ // This can take value as `LegacyS3UsEast1Endpoint` or `RegionalS3UsEast1Endpoint`
+ S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint
+
+ // Specifies if the S3 service should allow ARNs to direct the region
+ // the client's requests are sent to.
+ //
+ // s3_use_arn_region=true
+ S3UseARNRegion bool
}
type sharedConfigFile struct {
@@ -244,8 +275,30 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e
updateString(&cfg.RoleSessionName, section, roleSessionNameKey)
updateString(&cfg.SourceProfileName, section, sourceProfileKey)
updateString(&cfg.CredentialSource, section, credentialSourceKey)
-
updateString(&cfg.Region, section, regionKey)
+
+ if section.Has(roleDurationSecondsKey) {
+ d := time.Duration(section.Int(roleDurationSecondsKey)) * time.Second
+ cfg.AssumeRoleDuration = &d
+ }
+
+ if v := section.String(stsRegionalEndpointSharedKey); len(v) != 0 {
+ sre, err := endpoints.GetSTSRegionalEndpoint(v)
+ if err != nil {
+ return fmt.Errorf("failed to load %s from shared config, %s, %v",
+ stsRegionalEndpointSharedKey, file.Filename, err)
+ }
+ cfg.STSRegionalEndpoint = sre
+ }
+
+ if v := section.String(s3UsEast1RegionalSharedKey); len(v) != 0 {
+ sre, err := endpoints.GetS3UsEast1RegionalEndpoint(v)
+ if err != nil {
+ return fmt.Errorf("failed to load %s from shared config, %s, %v",
+ s3UsEast1RegionalSharedKey, file.Filename, err)
+ }
+ cfg.S3UsEast1RegionalEndpoint = sre
+ }
}
updateString(&cfg.CredentialProcess, section, credentialProcessKey)
@@ -271,6 +324,8 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e
updateString(&cfg.CSMPort, section, csmPortKey)
updateString(&cfg.CSMClientID, section, csmClientIDKey)
+ updateBool(&cfg.S3UseARNRegion, section, s3UseARNRegionKey)
+
return nil
}
@@ -363,6 +418,15 @@ func updateString(dst *string, section ini.Section, key string) {
*dst = section.String(key)
}
+// updateBool will only update the dst with the value in the section key, key
+// is present in the section.
+func updateBool(dst *bool, section ini.Section, key string) {
+ if !section.Has(key) {
+ return
+ }
+ *dst = section.Bool(key)
+}
+
// updateBoolPtr will only update the dst with the value in the section key,
// key is present in the section.
func updateBoolPtr(dst **bool, section ini.Section, key string) {
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go
index 244c86da..07ea799f 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go
@@ -1,8 +1,7 @@
package v4
import (
- "net/http"
- "strings"
+ "github.com/aws/aws-sdk-go/internal/strings"
)
// validator houses a set of rule needed for validation of a
@@ -61,7 +60,7 @@ type patterns []string
// been found
func (p patterns) IsValid(value string) bool {
for _, pattern := range p {
- if strings.HasPrefix(http.CanonicalHeaderKey(value), pattern) {
+ if strings.HasPrefixFold(value, pattern) {
return true
}
}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go
new file mode 100644
index 00000000..f35fc860
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go
@@ -0,0 +1,13 @@
+// +build !go1.7
+
+package v4
+
+import (
+ "net/http"
+
+ "github.com/aws/aws-sdk-go/aws"
+)
+
+func requestContext(r *http.Request) aws.Context {
+ return aws.BackgroundContext()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go
new file mode 100644
index 00000000..fed5c859
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go
@@ -0,0 +1,13 @@
+// +build go1.7
+
+package v4
+
+import (
+ "net/http"
+
+ "github.com/aws/aws-sdk-go/aws"
+)
+
+func requestContext(r *http.Request) aws.Context {
+ return r.Context()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go
new file mode 100644
index 00000000..02cbd97e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go
@@ -0,0 +1,63 @@
+package v4
+
+import (
+ "encoding/hex"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/credentials"
+)
+
+type credentialValueProvider interface {
+ Get() (credentials.Value, error)
+}
+
+// StreamSigner implements signing of event stream encoded payloads
+type StreamSigner struct {
+ region string
+ service string
+
+ credentials credentialValueProvider
+
+ prevSig []byte
+}
+
+// NewStreamSigner creates a SigV4 signer used to sign Event Stream encoded messages
+func NewStreamSigner(region, service string, seedSignature []byte, credentials *credentials.Credentials) *StreamSigner {
+ return &StreamSigner{
+ region: region,
+ service: service,
+ credentials: credentials,
+ prevSig: seedSignature,
+ }
+}
+
+// GetSignature takes an event stream encoded headers and payload and returns a signature
+func (s *StreamSigner) GetSignature(headers, payload []byte, date time.Time) ([]byte, error) {
+ credValue, err := s.credentials.Get()
+ if err != nil {
+ return nil, err
+ }
+
+ sigKey := deriveSigningKey(s.region, s.service, credValue.SecretAccessKey, date)
+
+ keyPath := buildSigningScope(s.region, s.service, date)
+
+ stringToSign := buildEventStreamStringToSign(headers, payload, s.prevSig, keyPath, date)
+
+ signature := hmacSHA256(sigKey, []byte(stringToSign))
+ s.prevSig = signature
+
+ return signature, nil
+}
+
+func buildEventStreamStringToSign(headers, payload, prevSig []byte, scope string, date time.Time) string {
+ return strings.Join([]string{
+ "AWS4-HMAC-SHA256-PAYLOAD",
+ formatTime(date),
+ scope,
+ hex.EncodeToString(prevSig),
+ hex.EncodeToString(hashSHA256(headers)),
+ hex.EncodeToString(hashSHA256(payload)),
+ }, "\n")
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
index 8104793a..d71f7b3f 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
@@ -76,9 +76,14 @@ import (
)
const (
+ authorizationHeader = "Authorization"
+ authHeaderSignatureElem = "Signature="
+ signatureQueryKey = "X-Amz-Signature"
+
authHeaderPrefix = "AWS4-HMAC-SHA256"
timeFormat = "20060102T150405Z"
shortTimeFormat = "20060102"
+ awsV4Request = "aws4_request"
// emptyStringSHA256 is a SHA256 of an empty string
emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855`
@@ -87,9 +92,9 @@ const (
var ignoredHeaders = rules{
blacklist{
mapRule{
- "Authorization": struct{}{},
- "User-Agent": struct{}{},
- "X-Amzn-Trace-Id": struct{}{},
+ authorizationHeader: struct{}{},
+ "User-Agent": struct{}{},
+ "X-Amzn-Trace-Id": struct{}{},
},
},
}
@@ -229,11 +234,9 @@ type signingCtx struct {
DisableURIPathEscaping bool
- credValues credentials.Value
- isPresign bool
- formattedTime string
- formattedShortTime string
- unsignedPayload bool
+ credValues credentials.Value
+ isPresign bool
+ unsignedPayload bool
bodyDigest string
signedHeaders string
@@ -337,7 +340,7 @@ func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, regi
}
var err error
- ctx.credValues, err = v4.Credentials.Get()
+ ctx.credValues, err = v4.Credentials.GetWithContext(requestContext(r))
if err != nil {
return http.Header{}, err
}
@@ -532,39 +535,56 @@ func (ctx *signingCtx) build(disableHeaderHoisting bool) error {
ctx.buildSignature() // depends on string to sign
if ctx.isPresign {
- ctx.Request.URL.RawQuery += "&X-Amz-Signature=" + ctx.signature
+ ctx.Request.URL.RawQuery += "&" + signatureQueryKey + "=" + ctx.signature
} else {
parts := []string{
authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString,
"SignedHeaders=" + ctx.signedHeaders,
- "Signature=" + ctx.signature,
+ authHeaderSignatureElem + ctx.signature,
}
- ctx.Request.Header.Set("Authorization", strings.Join(parts, ", "))
+ ctx.Request.Header.Set(authorizationHeader, strings.Join(parts, ", "))
}
return nil
}
-func (ctx *signingCtx) buildTime() {
- ctx.formattedTime = ctx.Time.UTC().Format(timeFormat)
- ctx.formattedShortTime = ctx.Time.UTC().Format(shortTimeFormat)
+// GetSignedRequestSignature attempts to extract the signature of the request.
+// Returning an error if the request is unsigned, or unable to extract the
+// signature.
+func GetSignedRequestSignature(r *http.Request) ([]byte, error) {
+
+ if auth := r.Header.Get(authorizationHeader); len(auth) != 0 {
+ ps := strings.Split(auth, ", ")
+ for _, p := range ps {
+ if idx := strings.Index(p, authHeaderSignatureElem); idx >= 0 {
+ sig := p[len(authHeaderSignatureElem):]
+ if len(sig) == 0 {
+ return nil, fmt.Errorf("invalid request signature authorization header")
+ }
+ return hex.DecodeString(sig)
+ }
+ }
+ }
+
+ if sig := r.URL.Query().Get("X-Amz-Signature"); len(sig) != 0 {
+ return hex.DecodeString(sig)
+ }
+
+ return nil, fmt.Errorf("request not signed")
+}
+func (ctx *signingCtx) buildTime() {
if ctx.isPresign {
duration := int64(ctx.ExpireTime / time.Second)
- ctx.Query.Set("X-Amz-Date", ctx.formattedTime)
+ ctx.Query.Set("X-Amz-Date", formatTime(ctx.Time))
ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10))
} else {
- ctx.Request.Header.Set("X-Amz-Date", ctx.formattedTime)
+ ctx.Request.Header.Set("X-Amz-Date", formatTime(ctx.Time))
}
}
func (ctx *signingCtx) buildCredentialString() {
- ctx.credentialString = strings.Join([]string{
- ctx.formattedShortTime,
- ctx.Region,
- ctx.ServiceName,
- "aws4_request",
- }, "/")
+ ctx.credentialString = buildSigningScope(ctx.Region, ctx.ServiceName, ctx.Time)
if ctx.isPresign {
ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString)
@@ -588,8 +608,7 @@ func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) {
var headers []string
headers = append(headers, "host")
for k, v := range header {
- canonicalKey := http.CanonicalHeaderKey(k)
- if !r.IsValid(canonicalKey) {
+ if !r.IsValid(k) {
continue // ignored header
}
if ctx.SignedHeaderVals == nil {
@@ -653,19 +672,15 @@ func (ctx *signingCtx) buildCanonicalString() {
func (ctx *signingCtx) buildStringToSign() {
ctx.stringToSign = strings.Join([]string{
authHeaderPrefix,
- ctx.formattedTime,
+ formatTime(ctx.Time),
ctx.credentialString,
- hex.EncodeToString(makeSha256([]byte(ctx.canonicalString))),
+ hex.EncodeToString(hashSHA256([]byte(ctx.canonicalString))),
}, "\n")
}
func (ctx *signingCtx) buildSignature() {
- secret := ctx.credValues.SecretAccessKey
- date := makeHmac([]byte("AWS4"+secret), []byte(ctx.formattedShortTime))
- region := makeHmac(date, []byte(ctx.Region))
- service := makeHmac(region, []byte(ctx.ServiceName))
- credentials := makeHmac(service, []byte("aws4_request"))
- signature := makeHmac(credentials, []byte(ctx.stringToSign))
+ creds := deriveSigningKey(ctx.Region, ctx.ServiceName, ctx.credValues.SecretAccessKey, ctx.Time)
+ signature := hmacSHA256(creds, []byte(ctx.stringToSign))
ctx.signature = hex.EncodeToString(signature)
}
@@ -726,13 +741,13 @@ func (ctx *signingCtx) removePresign() {
ctx.Query.Del("X-Amz-SignedHeaders")
}
-func makeHmac(key []byte, data []byte) []byte {
+func hmacSHA256(key []byte, data []byte) []byte {
hash := hmac.New(sha256.New, key)
hash.Write(data)
return hash.Sum(nil)
}
-func makeSha256(data []byte) []byte {
+func hashSHA256(data []byte) []byte {
hash := sha256.New()
hash.Write(data)
return hash.Sum(nil)
@@ -804,3 +819,28 @@ func stripExcessSpaces(vals []string) {
vals[i] = string(buf[:m])
}
}
+
+func buildSigningScope(region, service string, dt time.Time) string {
+ return strings.Join([]string{
+ formatShortTime(dt),
+ region,
+ service,
+ awsV4Request,
+ }, "/")
+}
+
+func deriveSigningKey(region, service, secretKey string, dt time.Time) []byte {
+ kDate := hmacSHA256([]byte("AWS4"+secretKey), []byte(formatShortTime(dt)))
+ kRegion := hmacSHA256(kDate, []byte(region))
+ kService := hmacSHA256(kRegion, []byte(service))
+ signingKey := hmacSHA256(kService, []byte(awsV4Request))
+ return signingKey
+}
+
+func formatShortTime(dt time.Time) string {
+ return dt.UTC().Format(shortTimeFormat)
+}
+
+func formatTime(dt time.Time) string {
+ return dt.UTC().Format(timeFormat)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go
index 45509154..98751ee8 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/types.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/types.go
@@ -2,6 +2,7 @@ package aws
import (
"io"
+ "strings"
"sync"
"github.com/aws/aws-sdk-go/internal/sdkio"
@@ -205,3 +206,59 @@ func (b *WriteAtBuffer) Bytes() []byte {
defer b.m.Unlock()
return b.buf
}
+
+// MultiCloser is a utility to close multiple io.Closers within a single
+// statement.
+type MultiCloser []io.Closer
+
+// Close closes all of the io.Closers making up the MultiClosers. Any
+// errors that occur while closing will be returned in the order they
+// occur.
+func (m MultiCloser) Close() error {
+ var errs errors
+ for _, c := range m {
+ err := c.Close()
+ if err != nil {
+ errs = append(errs, err)
+ }
+ }
+ if len(errs) != 0 {
+ return errs
+ }
+
+ return nil
+}
+
+type errors []error
+
+func (es errors) Error() string {
+ var parts []string
+ for _, e := range es {
+ parts = append(parts, e.Error())
+ }
+
+ return strings.Join(parts, "\n")
+}
+
+// CopySeekableBody copies the seekable body to an io.Writer
+func CopySeekableBody(dst io.Writer, src io.ReadSeeker) (int64, error) {
+ curPos, err := src.Seek(0, sdkio.SeekCurrent)
+ if err != nil {
+ return 0, err
+ }
+
+ // copy errors may be assumed to be from the body.
+ n, err := io.Copy(dst, src)
+ if err != nil {
+ return n, err
+ }
+
+ // seek back to the first position after reading to reset
+ // the body for transmission.
+ _, err = src.Seek(curPos, sdkio.SeekStart)
+ if err != nil {
+ return n, err
+ }
+
+ return n, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go
index d1548ebd..08127d60 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/version.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go
@@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
-const SDKVersion = "1.25.3"
+const SDKVersion = "1.34.0"
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go b/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go
new file mode 100644
index 00000000..876dcb3f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go
@@ -0,0 +1,40 @@
+// +build !go1.7
+
+package context
+
+import "time"
+
+// An emptyCtx is a copy of the Go 1.7 context.emptyCtx type. This is copied to
+// provide a 1.6 and 1.5 safe version of context that is compatible with Go
+// 1.7's Context.
+//
+// An emptyCtx is never canceled, has no values, and has no deadline. It is not
+// struct{}, since vars of this type must have distinct addresses.
+type emptyCtx int
+
+func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
+ return
+}
+
+func (*emptyCtx) Done() <-chan struct{} {
+ return nil
+}
+
+func (*emptyCtx) Err() error {
+ return nil
+}
+
+func (*emptyCtx) Value(key interface{}) interface{} {
+ return nil
+}
+
+func (e *emptyCtx) String() string {
+ switch e {
+ case BackgroundCtx:
+ return "aws.BackgroundContext"
+ }
+ return "unknown empty Context"
+}
+
+// BackgroundCtx is the common base context.
+var BackgroundCtx = new(emptyCtx)
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go
index e56dcee2..cf9fad81 100644
--- a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go
@@ -162,7 +162,7 @@ loop:
if len(tokens) == 0 {
break loop
}
-
+ // if should skip is true, we skip the tokens until should skip is set to false.
step = SkipTokenState
}
@@ -218,7 +218,7 @@ loop:
// S -> equal_expr' expr_stmt'
switch k.Kind {
case ASTKindEqualExpr:
- // assiging a value to some key
+ // assigning a value to some key
k.AppendChild(newExpression(tok))
stack.Push(newExprStatement(k))
case ASTKindExpr:
@@ -250,6 +250,13 @@ loop:
if !runeCompare(tok.Raw(), openBrace) {
return nil, NewParseError("expected '['")
}
+ // If OpenScopeState is not at the start, we must mark the previous ast as complete
+ //
+ // for example: if previous ast was a skip statement;
+ // we should mark it as complete before we create a new statement
+ if k.Kind != ASTKindStart {
+ stack.MarkComplete(k)
+ }
stmt := newStatement()
stack.Push(stmt)
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go
index 6bb69644..da7a4049 100644
--- a/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go
@@ -22,24 +22,24 @@ func newSkipper() skipper {
}
func (s *skipper) ShouldSkip(tok Token) bool {
+ // should skip state will be modified only if previous token was new line (NL);
+ // and the current token is not WhiteSpace (WS).
if s.shouldSkip &&
s.prevTok.Type() == TokenNL &&
tok.Type() != TokenWS {
-
s.Continue()
return false
}
s.prevTok = tok
-
return s.shouldSkip
}
func (s *skipper) Skip() {
s.shouldSkip = true
- s.prevTok = emptyToken
}
func (s *skipper) Continue() {
s.shouldSkip = false
+ // empty token is assigned as we return to default state, when should skip is false
s.prevTok = emptyToken
}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go b/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go
new file mode 100644
index 00000000..d008ae27
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go
@@ -0,0 +1,11 @@
+package strings
+
+import (
+ "strings"
+)
+
+// HasPrefixFold tests whether the string s begins with prefix, interpreted as UTF-8 strings,
+// under Unicode case-folding.
+func HasPrefixFold(s, prefix string) bool {
+ return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE b/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE
new file mode 100644
index 00000000..6a66aea5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go b/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go
new file mode 100644
index 00000000..14ad0c58
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go
@@ -0,0 +1,120 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package singleflight provides a duplicate function call suppression
+// mechanism.
+package singleflight
+
+import "sync"
+
+// call is an in-flight or completed singleflight.Do call
+type call struct {
+ wg sync.WaitGroup
+
+ // These fields are written once before the WaitGroup is done
+ // and are only read after the WaitGroup is done.
+ val interface{}
+ err error
+
+ // forgotten indicates whether Forget was called with this call's key
+ // while the call was still in flight.
+ forgotten bool
+
+ // These fields are read and written with the singleflight
+ // mutex held before the WaitGroup is done, and are read but
+ // not written after the WaitGroup is done.
+ dups int
+ chans []chan<- Result
+}
+
+// Group represents a class of work and forms a namespace in
+// which units of work can be executed with duplicate suppression.
+type Group struct {
+ mu sync.Mutex // protects m
+ m map[string]*call // lazily initialized
+}
+
+// Result holds the results of Do, so they can be passed
+// on a channel.
+type Result struct {
+ Val interface{}
+ Err error
+ Shared bool
+}
+
+// Do executes and returns the results of the given function, making
+// sure that only one execution is in-flight for a given key at a
+// time. If a duplicate comes in, the duplicate caller waits for the
+// original to complete and receives the same results.
+// The return value shared indicates whether v was given to multiple callers.
+func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) {
+ g.mu.Lock()
+ if g.m == nil {
+ g.m = make(map[string]*call)
+ }
+ if c, ok := g.m[key]; ok {
+ c.dups++
+ g.mu.Unlock()
+ c.wg.Wait()
+ return c.val, c.err, true
+ }
+ c := new(call)
+ c.wg.Add(1)
+ g.m[key] = c
+ g.mu.Unlock()
+
+ g.doCall(c, key, fn)
+ return c.val, c.err, c.dups > 0
+}
+
+// DoChan is like Do but returns a channel that will receive the
+// results when they are ready.
+func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result {
+ ch := make(chan Result, 1)
+ g.mu.Lock()
+ if g.m == nil {
+ g.m = make(map[string]*call)
+ }
+ if c, ok := g.m[key]; ok {
+ c.dups++
+ c.chans = append(c.chans, ch)
+ g.mu.Unlock()
+ return ch
+ }
+ c := &call{chans: []chan<- Result{ch}}
+ c.wg.Add(1)
+ g.m[key] = c
+ g.mu.Unlock()
+
+ go g.doCall(c, key, fn)
+
+ return ch
+}
+
+// doCall handles the single call for a key.
+func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) {
+ c.val, c.err = fn()
+ c.wg.Done()
+
+ g.mu.Lock()
+ if !c.forgotten {
+ delete(g.m, key)
+ }
+ for _, ch := range c.chans {
+ ch <- Result{c.val, c.err, c.dups > 0}
+ }
+ g.mu.Unlock()
+}
+
+// Forget tells the singleflight to forget about a key. Future calls
+// to Do for this key will call the function rather than waiting for
+// an earlier call to complete.
+func (g *Group) Forget(key string) {
+ g.mu.Lock()
+ if c, ok := g.m[key]; ok {
+ c.forgotten = true
+ }
+ delete(g.m, key)
+ g.mu.Unlock()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/checksum/content_md5.go b/vendor/github.com/aws/aws-sdk-go/private/checksum/content_md5.go
new file mode 100644
index 00000000..e045f38d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/checksum/content_md5.go
@@ -0,0 +1,53 @@
+package checksum
+
+import (
+ "crypto/md5"
+ "encoding/base64"
+ "fmt"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+const contentMD5Header = "Content-Md5"
+
+// AddBodyContentMD5Handler computes and sets the HTTP Content-MD5 header for requests that
+// require it.
+func AddBodyContentMD5Handler(r *request.Request) {
+ // if Content-MD5 header is already present, return
+ if v := r.HTTPRequest.Header.Get(contentMD5Header); len(v) != 0 {
+ return
+ }
+
+ // if S3DisableContentMD5Validation flag is set, return
+ if aws.BoolValue(r.Config.S3DisableContentMD5Validation) {
+ return
+ }
+
+ // if request is presigned, return
+ if r.IsPresigned() {
+ return
+ }
+
+ // if body is not seekable, return
+ if !aws.IsReaderSeekable(r.Body) {
+ if r.Config.Logger != nil {
+ r.Config.Logger.Log(fmt.Sprintf(
+ "Unable to compute Content-MD5 for unseekable body, S3.%s",
+ r.Operation.Name))
+ }
+ return
+ }
+
+ h := md5.New()
+
+ if _, err := aws.CopySeekableBody(h, r.Body); err != nil {
+ r.Error = awserr.New("ContentMD5", "failed to compute body MD5", err)
+ return
+ }
+
+ // encode the md5 checksum in base64 and set the request header.
+ v := base64.StdEncoding.EncodeToString(h.Sum(nil))
+ r.HTTPRequest.Header.Set(contentMD5Header, v)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go
index ecc7bf82..15105497 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go
@@ -101,7 +101,7 @@ func (hs *decodedHeaders) UnmarshalJSON(b []byte) error {
}
headers.Set(h.Name, value)
}
- (*hs) = decodedHeaders(headers)
+ *hs = decodedHeaders(headers)
return nil
}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go
index 4b972b2d..47433939 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go
@@ -21,10 +21,24 @@ type Decoder struct {
// NewDecoder initializes and returns a Decoder for decoding event
// stream messages from the reader provided.
-func NewDecoder(r io.Reader) *Decoder {
- return &Decoder{
+func NewDecoder(r io.Reader, opts ...func(*Decoder)) *Decoder {
+ d := &Decoder{
r: r,
}
+
+ for _, opt := range opts {
+ opt(d)
+ }
+
+ return d
+}
+
+// DecodeWithLogger adds a logger to be used by the decoder when decoding
+// stream events.
+func DecodeWithLogger(logger aws.Logger) func(*Decoder) {
+ return func(d *Decoder) {
+ d.logger = logger
+ }
}
// Decode attempts to decode a single message from the event stream reader.
@@ -40,6 +54,15 @@ func (d *Decoder) Decode(payloadBuf []byte) (m Message, err error) {
}()
}
+ m, err = Decode(reader, payloadBuf)
+
+ return m, err
+}
+
+// Decode attempts to decode a single message from the event stream reader.
+// Will return the event stream message, or error if Decode fails to read
+// the message from the reader.
+func Decode(reader io.Reader, payloadBuf []byte) (m Message, err error) {
crc := crc32.New(crc32IEEETable)
hashReader := io.TeeReader(reader, crc)
@@ -72,12 +95,6 @@ func (d *Decoder) Decode(payloadBuf []byte) (m Message, err error) {
return m, nil
}
-// UseLogger specifies the Logger that that the decoder should use to log the
-// message decode to.
-func (d *Decoder) UseLogger(logger aws.Logger) {
- d.logger = logger
-}
-
func logMessageDecode(logger aws.Logger, msgBuf *bytes.Buffer, msg Message, decodeErr error) {
w := bytes.NewBuffer(nil)
defer func() { logger.Log(w.String()) }()
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go
index 150a6098..ffade3bc 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go
@@ -3,61 +3,107 @@ package eventstream
import (
"bytes"
"encoding/binary"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
"hash"
"hash/crc32"
"io"
+
+ "github.com/aws/aws-sdk-go/aws"
)
// Encoder provides EventStream message encoding.
type Encoder struct {
- w io.Writer
+ w io.Writer
+ logger aws.Logger
headersBuf *bytes.Buffer
}
// NewEncoder initializes and returns an Encoder to encode Event Stream
// messages to an io.Writer.
-func NewEncoder(w io.Writer) *Encoder {
- return &Encoder{
+func NewEncoder(w io.Writer, opts ...func(*Encoder)) *Encoder {
+ e := &Encoder{
w: w,
headersBuf: bytes.NewBuffer(nil),
}
+
+ for _, opt := range opts {
+ opt(e)
+ }
+
+ return e
+}
+
+// EncodeWithLogger adds a logger to be used by the encode when decoding
+// stream events.
+func EncodeWithLogger(logger aws.Logger) func(*Encoder) {
+ return func(d *Encoder) {
+ d.logger = logger
+ }
}
// Encode encodes a single EventStream message to the io.Writer the Encoder
// was created with. An error is returned if writing the message fails.
-func (e *Encoder) Encode(msg Message) error {
+func (e *Encoder) Encode(msg Message) (err error) {
e.headersBuf.Reset()
- err := encodeHeaders(e.headersBuf, msg.Headers)
- if err != nil {
+ writer := e.w
+ if e.logger != nil {
+ encodeMsgBuf := bytes.NewBuffer(nil)
+ writer = io.MultiWriter(writer, encodeMsgBuf)
+ defer func() {
+ logMessageEncode(e.logger, encodeMsgBuf, msg, err)
+ }()
+ }
+
+ if err = EncodeHeaders(e.headersBuf, msg.Headers); err != nil {
return err
}
crc := crc32.New(crc32IEEETable)
- hashWriter := io.MultiWriter(e.w, crc)
+ hashWriter := io.MultiWriter(writer, crc)
headersLen := uint32(e.headersBuf.Len())
payloadLen := uint32(len(msg.Payload))
- if err := encodePrelude(hashWriter, crc, headersLen, payloadLen); err != nil {
+ if err = encodePrelude(hashWriter, crc, headersLen, payloadLen); err != nil {
return err
}
if headersLen > 0 {
- if _, err := io.Copy(hashWriter, e.headersBuf); err != nil {
+ if _, err = io.Copy(hashWriter, e.headersBuf); err != nil {
return err
}
}
if payloadLen > 0 {
- if _, err := hashWriter.Write(msg.Payload); err != nil {
+ if _, err = hashWriter.Write(msg.Payload); err != nil {
return err
}
}
msgCRC := crc.Sum32()
- return binary.Write(e.w, binary.BigEndian, msgCRC)
+ return binary.Write(writer, binary.BigEndian, msgCRC)
+}
+
+func logMessageEncode(logger aws.Logger, msgBuf *bytes.Buffer, msg Message, encodeErr error) {
+ w := bytes.NewBuffer(nil)
+ defer func() { logger.Log(w.String()) }()
+
+ fmt.Fprintf(w, "Message to encode:\n")
+ encoder := json.NewEncoder(w)
+ if err := encoder.Encode(msg); err != nil {
+ fmt.Fprintf(w, "Failed to get encoded message, %v\n", err)
+ }
+
+ if encodeErr != nil {
+ fmt.Fprintf(w, "Encode error: %v\n", encodeErr)
+ return
+ }
+
+ fmt.Fprintf(w, "Raw message:\n%s\n", hex.Dump(msgBuf.Bytes()))
}
func encodePrelude(w io.Writer, crc hash.Hash32, headersLen, payloadLen uint32) error {
@@ -86,7 +132,9 @@ func encodePrelude(w io.Writer, crc hash.Hash32, headersLen, payloadLen uint32)
return nil
}
-func encodeHeaders(w io.Writer, headers Headers) error {
+// EncodeHeaders writes the header values to the writer encoded in the event
+// stream format. Returns an error if a header fails to encode.
+func EncodeHeaders(w io.Writer, headers Headers) error {
for _, h := range headers {
hn := headerName{
Len: uint8(len(h.Name)),
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go
index 5ea5a988..34c2e89d 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go
@@ -1,6 +1,9 @@
package eventstreamapi
-import "fmt"
+import (
+ "fmt"
+ "sync"
+)
type messageError struct {
code string
@@ -22,3 +25,53 @@ func (e messageError) Error() string {
func (e messageError) OrigErr() error {
return nil
}
+
+// OnceError wraps the behavior of recording an error
+// once and signal on a channel when this has occurred.
+// Signaling is done by closing of the channel.
+//
+// Type is safe for concurrent usage.
+type OnceError struct {
+ mu sync.RWMutex
+ err error
+ ch chan struct{}
+}
+
+// NewOnceError return a new OnceError
+func NewOnceError() *OnceError {
+ return &OnceError{
+ ch: make(chan struct{}, 1),
+ }
+}
+
+// Err acquires a read-lock and returns an
+// error if one has been set.
+func (e *OnceError) Err() error {
+ e.mu.RLock()
+ err := e.err
+ e.mu.RUnlock()
+
+ return err
+}
+
+// SetError acquires a write-lock and will set
+// the underlying error value if one has not been set.
+func (e *OnceError) SetError(err error) {
+ if err == nil {
+ return
+ }
+
+ e.mu.Lock()
+ if e.err == nil {
+ e.err = err
+ close(e.ch)
+ }
+ e.mu.Unlock()
+}
+
+// ErrorSet returns a channel that will be used to signal
+// that an error has been set. This channel will be closed
+// when the error value has been set for OnceError.
+func (e *OnceError) ErrorSet() <-chan struct{} {
+ return e.ch
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/reader.go
similarity index 76%
rename from vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go
rename to vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/reader.go
index 97937c8e..0e4aa42f 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/reader.go
@@ -2,9 +2,7 @@ package eventstreamapi
import (
"fmt"
- "io"
- "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/private/protocol"
"github.com/aws/aws-sdk-go/private/protocol/eventstream"
)
@@ -15,27 +13,8 @@ type Unmarshaler interface {
UnmarshalEvent(protocol.PayloadUnmarshaler, eventstream.Message) error
}
-// EventStream headers with specific meaning to async API functionality.
-const (
- MessageTypeHeader = `:message-type` // Identifies type of message.
- EventMessageType = `event`
- ErrorMessageType = `error`
- ExceptionMessageType = `exception`
-
- // Message Events
- EventTypeHeader = `:event-type` // Identifies message event type e.g. "Stats".
-
- // Message Error
- ErrorCodeHeader = `:error-code`
- ErrorMessageHeader = `:error-message`
-
- // Message Exception
- ExceptionTypeHeader = `:exception-type`
-)
-
// EventReader provides reading from the EventStream of an reader.
type EventReader struct {
- reader io.ReadCloser
decoder *eventstream.Decoder
unmarshalerForEventType func(string) (Unmarshaler, error)
@@ -47,27 +26,18 @@ type EventReader struct {
// NewEventReader returns a EventReader built from the reader and unmarshaler
// provided. Use ReadStream method to start reading from the EventStream.
func NewEventReader(
- reader io.ReadCloser,
+ decoder *eventstream.Decoder,
payloadUnmarshaler protocol.PayloadUnmarshaler,
unmarshalerForEventType func(string) (Unmarshaler, error),
) *EventReader {
return &EventReader{
- reader: reader,
- decoder: eventstream.NewDecoder(reader),
+ decoder: decoder,
payloadUnmarshaler: payloadUnmarshaler,
unmarshalerForEventType: unmarshalerForEventType,
payloadBuf: make([]byte, 10*1024),
}
}
-// UseLogger instructs the EventReader to use the logger and log level
-// specified.
-func (r *EventReader) UseLogger(logger aws.Logger, logLevel aws.LogLevelType) {
- if logger != nil && logLevel.Matches(aws.LogDebugWithEventStreamBody) {
- r.decoder.UseLogger(logger)
- }
-}
-
// ReadEvent attempts to read a message from the EventStream and return the
// unmarshaled event value that the message is for.
//
@@ -95,15 +65,27 @@ func (r *EventReader) ReadEvent() (event interface{}, err error) {
case EventMessageType:
return r.unmarshalEventMessage(msg)
case ExceptionMessageType:
- err = r.unmarshalEventException(msg)
- return nil, err
+ return nil, r.unmarshalEventException(msg)
case ErrorMessageType:
return nil, r.unmarshalErrorMessage(msg)
default:
- return nil, fmt.Errorf("unknown eventstream message type, %v", typ)
+ return nil, &UnknownMessageTypeError{
+ Type: typ, Message: msg.Clone(),
+ }
}
}
+// UnknownMessageTypeError provides an error when a message is received from
+// the stream, but the reader is unable to determine what kind of message it is.
+type UnknownMessageTypeError struct {
+ Type string
+ Message eventstream.Message
+}
+
+func (e *UnknownMessageTypeError) Error() string {
+ return "unknown eventstream message type, " + e.Type
+}
+
func (r *EventReader) unmarshalEventMessage(
msg eventstream.Message,
) (event interface{}, err error) {
@@ -174,11 +156,6 @@ func (r *EventReader) unmarshalErrorMessage(msg eventstream.Message) (err error)
return msgErr
}
-// Close closes the EventReader's EventStream reader.
-func (r *EventReader) Close() error {
- return r.reader.Close()
-}
-
// GetHeaderString returns the value of the header as a string. If the header
// is not set or the value is not a string an error will be returned.
func GetHeaderString(msg eventstream.Message, headerName string) (string, error) {
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/shared.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/shared.go
new file mode 100644
index 00000000..e46b8acc
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/shared.go
@@ -0,0 +1,23 @@
+package eventstreamapi
+
+// EventStream headers with specific meaning to async API functionality.
+const (
+ ChunkSignatureHeader = `:chunk-signature` // chunk signature for message
+ DateHeader = `:date` // Date header for signature
+
+ // Message header and values
+ MessageTypeHeader = `:message-type` // Identifies type of message.
+ EventMessageType = `event`
+ ErrorMessageType = `error`
+ ExceptionMessageType = `exception`
+
+ // Message Events
+ EventTypeHeader = `:event-type` // Identifies message event type e.g. "Stats".
+
+ // Message Error
+ ErrorCodeHeader = `:error-code`
+ ErrorMessageHeader = `:error-message`
+
+ // Message Exception
+ ExceptionTypeHeader = `:exception-type`
+)
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/signer.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/signer.go
new file mode 100644
index 00000000..3a7ba5cd
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/signer.go
@@ -0,0 +1,123 @@
+package eventstreamapi
+
+import (
+ "bytes"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/private/protocol/eventstream"
+)
+
+var timeNow = time.Now
+
+// StreamSigner defines an interface for the implementation of signing of event stream payloads
+type StreamSigner interface {
+ GetSignature(headers, payload []byte, date time.Time) ([]byte, error)
+}
+
+// SignEncoder envelopes event stream messages
+// into an event stream message payload with included
+// signature headers using the provided signer and encoder.
+type SignEncoder struct {
+ signer StreamSigner
+ encoder Encoder
+ bufEncoder *BufferEncoder
+
+ closeErr error
+ closed bool
+}
+
+// NewSignEncoder returns a new SignEncoder using the provided stream signer and
+// event stream encoder.
+func NewSignEncoder(signer StreamSigner, encoder Encoder) *SignEncoder {
+ // TODO: Need to pass down logging
+
+ return &SignEncoder{
+ signer: signer,
+ encoder: encoder,
+ bufEncoder: NewBufferEncoder(),
+ }
+}
+
+// Close encodes a final event stream signing envelope with an empty event stream
+// payload. This final end-frame is used to mark the conclusion of the stream.
+func (s *SignEncoder) Close() error {
+ if s.closed {
+ return s.closeErr
+ }
+
+ if err := s.encode([]byte{}); err != nil {
+ if strings.Contains(err.Error(), "on closed pipe") {
+ return nil
+ }
+
+ s.closeErr = err
+ s.closed = true
+ return s.closeErr
+ }
+
+ return nil
+}
+
+// Encode takes the provided message and add envelopes the message
+// with the required signature.
+func (s *SignEncoder) Encode(msg eventstream.Message) error {
+ payload, err := s.bufEncoder.Encode(msg)
+ if err != nil {
+ return err
+ }
+
+ return s.encode(payload)
+}
+
+func (s SignEncoder) encode(payload []byte) error {
+ date := timeNow()
+
+ var msg eventstream.Message
+ msg.Headers.Set(DateHeader, eventstream.TimestampValue(date))
+ msg.Payload = payload
+
+ var headers bytes.Buffer
+ if err := eventstream.EncodeHeaders(&headers, msg.Headers); err != nil {
+ return err
+ }
+
+ sig, err := s.signer.GetSignature(headers.Bytes(), msg.Payload, date)
+ if err != nil {
+ return err
+ }
+
+ msg.Headers.Set(ChunkSignatureHeader, eventstream.BytesValue(sig))
+
+ return s.encoder.Encode(msg)
+}
+
+// BufferEncoder is a utility that provides a buffered
+// event stream encoder
+type BufferEncoder struct {
+ encoder Encoder
+ buffer *bytes.Buffer
+}
+
+// NewBufferEncoder returns a new BufferEncoder initialized
+// with a 1024 byte buffer.
+func NewBufferEncoder() *BufferEncoder {
+ buf := bytes.NewBuffer(make([]byte, 1024))
+ return &BufferEncoder{
+ encoder: eventstream.NewEncoder(buf),
+ buffer: buf,
+ }
+}
+
+// Encode returns the encoded message as a byte slice.
+// The returned byte slice will be modified on the next encode call
+// and should not be held onto.
+func (e *BufferEncoder) Encode(msg eventstream.Message) ([]byte, error) {
+ e.buffer.Reset()
+
+ if err := e.encoder.Encode(msg); err != nil {
+ return nil, err
+ }
+
+ return e.buffer.Bytes(), nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/stream_writer.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/stream_writer.go
new file mode 100644
index 00000000..433bb163
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/stream_writer.go
@@ -0,0 +1,129 @@
+package eventstreamapi
+
+import (
+ "fmt"
+ "io"
+ "sync"
+
+ "github.com/aws/aws-sdk-go/aws"
+)
+
+// StreamWriter provides concurrent safe writing to an event stream.
+type StreamWriter struct {
+ eventWriter *EventWriter
+ stream chan eventWriteAsyncReport
+
+ done chan struct{}
+ closeOnce sync.Once
+ err *OnceError
+
+ streamCloser io.Closer
+}
+
+// NewStreamWriter returns a StreamWriter for the event writer, and stream
+// closer provided.
+func NewStreamWriter(eventWriter *EventWriter, streamCloser io.Closer) *StreamWriter {
+ w := &StreamWriter{
+ eventWriter: eventWriter,
+ streamCloser: streamCloser,
+ stream: make(chan eventWriteAsyncReport),
+ done: make(chan struct{}),
+ err: NewOnceError(),
+ }
+ go w.writeStream()
+
+ return w
+}
+
+// Close terminates the writers ability to write new events to the stream. Any
+// future call to Send will fail with an error.
+func (w *StreamWriter) Close() error {
+ w.closeOnce.Do(w.safeClose)
+ return w.Err()
+}
+
+func (w *StreamWriter) safeClose() {
+ close(w.done)
+}
+
+// ErrorSet returns a channel which will be closed
+// if an error occurs.
+func (w *StreamWriter) ErrorSet() <-chan struct{} {
+ return w.err.ErrorSet()
+}
+
+// Err returns any error that occurred while attempting to write an event to the
+// stream.
+func (w *StreamWriter) Err() error {
+ return w.err.Err()
+}
+
+// Send writes a single event to the stream returning an error if the write
+// failed.
+//
+// Send may be called concurrently. Events will be written to the stream
+// safely.
+func (w *StreamWriter) Send(ctx aws.Context, event Marshaler) error {
+ if err := w.Err(); err != nil {
+ return err
+ }
+
+ resultCh := make(chan error)
+ wrapped := eventWriteAsyncReport{
+ Event: event,
+ Result: resultCh,
+ }
+
+ select {
+ case w.stream <- wrapped:
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-w.done:
+ return fmt.Errorf("stream closed, unable to send event")
+ }
+
+ select {
+ case err := <-resultCh:
+ return err
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-w.done:
+ return fmt.Errorf("stream closed, unable to send event")
+ }
+}
+
+func (w *StreamWriter) writeStream() {
+ defer w.Close()
+
+ for {
+ select {
+ case wrapper := <-w.stream:
+ err := w.eventWriter.WriteEvent(wrapper.Event)
+ wrapper.ReportResult(w.done, err)
+ if err != nil {
+ w.err.SetError(err)
+ return
+ }
+
+ case <-w.done:
+ if err := w.streamCloser.Close(); err != nil {
+ w.err.SetError(err)
+ }
+ return
+ }
+ }
+}
+
+type eventWriteAsyncReport struct {
+ Event Marshaler
+ Result chan<- error
+}
+
+func (e eventWriteAsyncReport) ReportResult(cancel <-chan struct{}, err error) bool {
+ select {
+ case e.Result <- err:
+ return true
+ case <-cancel:
+ return false
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/writer.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/writer.go
new file mode 100644
index 00000000..10a3823d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/writer.go
@@ -0,0 +1,109 @@
+package eventstreamapi
+
+import (
+ "github.com/aws/aws-sdk-go/private/protocol"
+ "github.com/aws/aws-sdk-go/private/protocol/eventstream"
+)
+
+// Marshaler provides a marshaling interface for event types to event stream
+// messages.
+type Marshaler interface {
+ MarshalEvent(protocol.PayloadMarshaler) (eventstream.Message, error)
+}
+
+// Encoder is an stream encoder that will encode an event stream message for
+// the transport.
+type Encoder interface {
+ Encode(eventstream.Message) error
+}
+
+// EventWriter provides a wrapper around the underlying event stream encoder
+// for an io.WriteCloser.
+type EventWriter struct {
+ encoder Encoder
+ payloadMarshaler protocol.PayloadMarshaler
+ eventTypeFor func(Marshaler) (string, error)
+}
+
+// NewEventWriter returns a new event stream writer, that will write to the
+// writer provided. Use the WriteEvent method to write an event to the stream.
+func NewEventWriter(encoder Encoder, pm protocol.PayloadMarshaler, eventTypeFor func(Marshaler) (string, error),
+) *EventWriter {
+ return &EventWriter{
+ encoder: encoder,
+ payloadMarshaler: pm,
+ eventTypeFor: eventTypeFor,
+ }
+}
+
+// WriteEvent writes an event to the stream. Returns an error if the event
+// fails to marshal into a message, or writing to the underlying writer fails.
+func (w *EventWriter) WriteEvent(event Marshaler) error {
+ msg, err := w.marshal(event)
+ if err != nil {
+ return err
+ }
+
+ return w.encoder.Encode(msg)
+}
+
+func (w *EventWriter) marshal(event Marshaler) (eventstream.Message, error) {
+ eventType, err := w.eventTypeFor(event)
+ if err != nil {
+ return eventstream.Message{}, err
+ }
+
+ msg, err := event.MarshalEvent(w.payloadMarshaler)
+ if err != nil {
+ return eventstream.Message{}, err
+ }
+
+ msg.Headers.Set(EventTypeHeader, eventstream.StringValue(eventType))
+ return msg, nil
+}
+
+//type EventEncoder struct {
+// encoder Encoder
+// ppayloadMarshaler protocol.PayloadMarshaler
+// eventTypeFor func(Marshaler) (string, error)
+//}
+//
+//func (e EventEncoder) Encode(event Marshaler) error {
+// msg, err := e.marshal(event)
+// if err != nil {
+// return err
+// }
+//
+// return w.encoder.Encode(msg)
+//}
+//
+//func (e EventEncoder) marshal(event Marshaler) (eventstream.Message, error) {
+// eventType, err := w.eventTypeFor(event)
+// if err != nil {
+// return eventstream.Message{}, err
+// }
+//
+// msg, err := event.MarshalEvent(w.payloadMarshaler)
+// if err != nil {
+// return eventstream.Message{}, err
+// }
+//
+// msg.Headers.Set(EventTypeHeader, eventstream.StringValue(eventType))
+// return msg, nil
+//}
+//
+//func (w *EventWriter) marshal(event Marshaler) (eventstream.Message, error) {
+// eventType, err := w.eventTypeFor(event)
+// if err != nil {
+// return eventstream.Message{}, err
+// }
+//
+// msg, err := event.MarshalEvent(w.payloadMarshaler)
+// if err != nil {
+// return eventstream.Message{}, err
+// }
+//
+// msg.Headers.Set(EventTypeHeader, eventstream.StringValue(eventType))
+// return msg, nil
+//}
+//
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go
index 3b44dde2..f6f8c567 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go
@@ -52,6 +52,15 @@ func (hs *Headers) Del(name string) {
}
}
+// Clone returns a deep copy of the headers
+func (hs Headers) Clone() Headers {
+ o := make(Headers, 0, len(hs))
+ for _, h := range hs {
+ o.Set(h.Name, h.Value)
+ }
+ return o
+}
+
func decodeHeaders(r io.Reader) (Headers, error) {
hs := Headers{}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go
index e3fc0766..9f509d8f 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go
@@ -461,6 +461,11 @@ func (v *TimestampValue) decode(r io.Reader) error {
return nil
}
+// MarshalJSON implements the json.Marshaler interface
+func (v TimestampValue) MarshalJSON() ([]byte, error) {
+ return []byte(v.String()), nil
+}
+
func timeFromEpochMilli(t int64) time.Time {
secs := t / 1e3
msec := t % 1e3
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go
index 2dc012a6..f7427da0 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go
@@ -27,7 +27,7 @@ func (m *Message) rawMessage() (rawMessage, error) {
if len(m.Headers) > 0 {
var headers bytes.Buffer
- if err := encodeHeaders(&headers, m.Headers); err != nil {
+ if err := EncodeHeaders(&headers, m.Headers); err != nil {
return rawMessage{}, err
}
raw.Headers = headers.Bytes()
@@ -57,6 +57,20 @@ func (m *Message) rawMessage() (rawMessage, error) {
return raw, nil
}
+// Clone returns a deep copy of the message.
+func (m Message) Clone() Message {
+ var payload []byte
+ if m.Payload != nil {
+ payload = make([]byte, len(m.Payload))
+ copy(payload, m.Payload)
+ }
+
+ return Message{
+ Headers: m.Headers.Clone(),
+ Payload: payload,
+ }
+}
+
type messagePrelude struct {
Length uint32
HeadersLen uint32
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go
index ea0da79a..5e949969 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go
@@ -7,6 +7,7 @@ import (
"fmt"
"io"
"reflect"
+ "strings"
"time"
"github.com/aws/aws-sdk-go/aws"
@@ -45,10 +46,31 @@ func UnmarshalJSON(v interface{}, stream io.Reader) error {
return err
}
- return unmarshalAny(reflect.ValueOf(v), out, "")
+ return unmarshaler{}.unmarshalAny(reflect.ValueOf(v), out, "")
}
-func unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) error {
+// UnmarshalJSONCaseInsensitive reads a stream and unmarshals the result into the
+// object v. Ignores casing for structure members.
+func UnmarshalJSONCaseInsensitive(v interface{}, stream io.Reader) error {
+ var out interface{}
+
+ err := json.NewDecoder(stream).Decode(&out)
+ if err == io.EOF {
+ return nil
+ } else if err != nil {
+ return err
+ }
+
+ return unmarshaler{
+ caseInsensitive: true,
+ }.unmarshalAny(reflect.ValueOf(v), out, "")
+}
+
+type unmarshaler struct {
+ caseInsensitive bool
+}
+
+func (u unmarshaler) unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) error {
vtype := value.Type()
if vtype.Kind() == reflect.Ptr {
vtype = vtype.Elem() // check kind of actual element type
@@ -80,17 +102,17 @@ func unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag)
if field, ok := vtype.FieldByName("_"); ok {
tag = field.Tag
}
- return unmarshalStruct(value, data, tag)
+ return u.unmarshalStruct(value, data, tag)
case "list":
- return unmarshalList(value, data, tag)
+ return u.unmarshalList(value, data, tag)
case "map":
- return unmarshalMap(value, data, tag)
+ return u.unmarshalMap(value, data, tag)
default:
- return unmarshalScalar(value, data, tag)
+ return u.unmarshalScalar(value, data, tag)
}
}
-func unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTag) error {
+func (u unmarshaler) unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTag) error {
if data == nil {
return nil
}
@@ -114,7 +136,7 @@ func unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTa
// unwrap any payloads
if payload := tag.Get("payload"); payload != "" {
field, _ := t.FieldByName(payload)
- return unmarshalAny(value.FieldByName(payload), data, field.Tag)
+ return u.unmarshalAny(value.FieldByName(payload), data, field.Tag)
}
for i := 0; i < t.NumField(); i++ {
@@ -128,9 +150,19 @@ func unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTa
if locName := field.Tag.Get("locationName"); locName != "" {
name = locName
}
+ if u.caseInsensitive {
+ if _, ok := mapData[name]; !ok {
+ // Fallback to uncased name search if the exact name didn't match.
+ for kn, v := range mapData {
+ if strings.EqualFold(kn, name) {
+ mapData[name] = v
+ }
+ }
+ }
+ }
member := value.FieldByIndex(field.Index)
- err := unmarshalAny(member, mapData[name], field.Tag)
+ err := u.unmarshalAny(member, mapData[name], field.Tag)
if err != nil {
return err
}
@@ -138,7 +170,7 @@ func unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTa
return nil
}
-func unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) error {
+func (u unmarshaler) unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) error {
if data == nil {
return nil
}
@@ -153,7 +185,7 @@ func unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag)
}
for i, c := range listData {
- err := unmarshalAny(value.Index(i), c, "")
+ err := u.unmarshalAny(value.Index(i), c, "")
if err != nil {
return err
}
@@ -162,7 +194,7 @@ func unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag)
return nil
}
-func unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag) error {
+func (u unmarshaler) unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag) error {
if data == nil {
return nil
}
@@ -179,14 +211,14 @@ func unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag)
kvalue := reflect.ValueOf(k)
vvalue := reflect.New(value.Type().Elem()).Elem()
- unmarshalAny(vvalue, v, "")
+ u.unmarshalAny(vvalue, v, "")
value.SetMapIndex(kvalue, vvalue)
}
return nil
}
-func unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTag) error {
+func (u unmarshaler) unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTag) error {
switch d := data.(type) {
case nil:
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go
index e21614a1..0ea0647a 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go
@@ -64,7 +64,7 @@ func (h HandlerPayloadMarshal) MarshalPayload(w io.Writer, v interface{}) error
metadata.ClientInfo{},
request.Handlers{},
nil,
- &request.Operation{HTTPMethod: "GET"},
+ &request.Operation{HTTPMethod: "PUT"},
v,
nil,
)
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go
new file mode 100644
index 00000000..9d521dcb
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go
@@ -0,0 +1,49 @@
+package protocol
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// RequireHTTPMinProtocol request handler is used to enforce that
+// the target endpoint supports the given major and minor HTTP protocol version.
+type RequireHTTPMinProtocol struct {
+ Major, Minor int
+}
+
+// Handler will mark the request.Request with an error if the
+// target endpoint did not connect with the required HTTP protocol
+// major and minor version.
+func (p RequireHTTPMinProtocol) Handler(r *request.Request) {
+ if r.Error != nil || r.HTTPResponse == nil {
+ return
+ }
+
+ if !strings.HasPrefix(r.HTTPResponse.Proto, "HTTP") {
+ r.Error = newMinHTTPProtoError(p.Major, p.Minor, r)
+ }
+
+ if r.HTTPResponse.ProtoMajor < p.Major || r.HTTPResponse.ProtoMinor < p.Minor {
+ r.Error = newMinHTTPProtoError(p.Major, p.Minor, r)
+ }
+}
+
+// ErrCodeMinimumHTTPProtocolError error code is returned when the target endpoint
+// did not match the required HTTP major and minor protocol version.
+const ErrCodeMinimumHTTPProtocolError = "MinimumHTTPProtocolError"
+
+func newMinHTTPProtoError(major, minor int, r *request.Request) error {
+ return awserr.NewRequestFailure(
+ awserr.New("MinimumHTTPProtocolError",
+ fmt.Sprintf(
+ "operation requires minimum HTTP protocol of HTTP/%d.%d, but was %s",
+ major, minor, r.HTTPResponse.Proto,
+ ),
+ nil,
+ ),
+ r.HTTPResponse.StatusCode, r.RequestID,
+ )
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go
index 0cb99eb5..d40346a7 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go
@@ -1,7 +1,7 @@
// Package query provides serialization of AWS query requests, and responses.
package query
-//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/query.json build_test.go
+//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/query.json build_test.go
import (
"net/url"
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go
index f69c1efc..9231e95d 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go
@@ -1,6 +1,6 @@
package query
-//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/query.json unmarshal_test.go
+//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/query.json unmarshal_test.go
import (
"encoding/xml"
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
index 74e361e0..92f8b4d9 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
@@ -15,6 +15,7 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
+ awsStrings "github.com/aws/aws-sdk-go/internal/strings"
"github.com/aws/aws-sdk-go/private/protocol"
)
@@ -28,7 +29,9 @@ var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta
func Unmarshal(r *request.Request) {
if r.DataFilled() {
v := reflect.Indirect(reflect.ValueOf(r.Data))
- unmarshalBody(r, v)
+ if err := unmarshalBody(r, v); err != nil {
+ r.Error = err
+ }
}
}
@@ -40,12 +43,21 @@ func UnmarshalMeta(r *request.Request) {
r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id")
}
if r.DataFilled() {
- v := reflect.Indirect(reflect.ValueOf(r.Data))
- unmarshalLocationElements(r, v)
+ if err := UnmarshalResponse(r.HTTPResponse, r.Data, aws.BoolValue(r.Config.LowerCaseHeaderMaps)); err != nil {
+ r.Error = err
+ }
}
}
-func unmarshalBody(r *request.Request, v reflect.Value) {
+// UnmarshalResponse attempts to unmarshal the REST response headers to
+// the data type passed in. The type must be a pointer. An error is returned
+// with any error unmarshaling the response into the target datatype.
+func UnmarshalResponse(resp *http.Response, data interface{}, lowerCaseHeaderMaps bool) error {
+ v := reflect.Indirect(reflect.ValueOf(data))
+ return unmarshalLocationElements(resp, v, lowerCaseHeaderMaps)
+}
+
+func unmarshalBody(r *request.Request, v reflect.Value) error {
if field, ok := v.Type().FieldByName("_"); ok {
if payloadName := field.Tag.Get("payload"); payloadName != "" {
pfield, _ := v.Type().FieldByName(payloadName)
@@ -57,35 +69,38 @@ func unmarshalBody(r *request.Request, v reflect.Value) {
defer r.HTTPResponse.Body.Close()
b, err := ioutil.ReadAll(r.HTTPResponse.Body)
if err != nil {
- r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err)
- } else {
- payload.Set(reflect.ValueOf(b))
+ return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err)
}
+
+ payload.Set(reflect.ValueOf(b))
+
case *string:
defer r.HTTPResponse.Body.Close()
b, err := ioutil.ReadAll(r.HTTPResponse.Body)
if err != nil {
- r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err)
- } else {
- str := string(b)
- payload.Set(reflect.ValueOf(&str))
+ return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err)
}
+
+ str := string(b)
+ payload.Set(reflect.ValueOf(&str))
+
default:
switch payload.Type().String() {
case "io.ReadCloser":
payload.Set(reflect.ValueOf(r.HTTPResponse.Body))
+
case "io.ReadSeeker":
b, err := ioutil.ReadAll(r.HTTPResponse.Body)
if err != nil {
- r.Error = awserr.New(request.ErrCodeSerialization,
+ return awserr.New(request.ErrCodeSerialization,
"failed to read response body", err)
- return
}
payload.Set(reflect.ValueOf(ioutil.NopCloser(bytes.NewReader(b))))
+
default:
io.Copy(ioutil.Discard, r.HTTPResponse.Body)
- defer r.HTTPResponse.Body.Close()
- r.Error = awserr.New(request.ErrCodeSerialization,
+ r.HTTPResponse.Body.Close()
+ return awserr.New(request.ErrCodeSerialization,
"failed to decode REST response",
fmt.Errorf("unknown payload type %s", payload.Type()))
}
@@ -94,9 +109,11 @@ func unmarshalBody(r *request.Request, v reflect.Value) {
}
}
}
+
+ return nil
}
-func unmarshalLocationElements(r *request.Request, v reflect.Value) {
+func unmarshalLocationElements(resp *http.Response, v reflect.Value, lowerCaseHeaderMaps bool) error {
for i := 0; i < v.NumField(); i++ {
m, field := v.Field(i), v.Type().Field(i)
if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) {
@@ -111,26 +128,25 @@ func unmarshalLocationElements(r *request.Request, v reflect.Value) {
switch field.Tag.Get("location") {
case "statusCode":
- unmarshalStatusCode(m, r.HTTPResponse.StatusCode)
+ unmarshalStatusCode(m, resp.StatusCode)
+
case "header":
- err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name), field.Tag)
+ err := unmarshalHeader(m, resp.Header.Get(name), field.Tag)
if err != nil {
- r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err)
- break
+ return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err)
}
+
case "headers":
prefix := field.Tag.Get("locationName")
- err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix)
+ err := unmarshalHeaderMap(m, resp.Header, prefix, lowerCaseHeaderMaps)
if err != nil {
- r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err)
- break
+ awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err)
}
}
}
- if r.Error != nil {
- return
- }
}
+
+ return nil
}
func unmarshalStatusCode(v reflect.Value, statusCode int) {
@@ -145,7 +161,7 @@ func unmarshalStatusCode(v reflect.Value, statusCode int) {
}
}
-func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error {
+func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string, normalize bool) error {
if len(headers) == 0 {
return nil
}
@@ -153,8 +169,12 @@ func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) err
case map[string]*string: // we only support string map value types
out := map[string]*string{}
for k, v := range headers {
- k = http.CanonicalHeaderKey(k)
- if strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) {
+ if awsStrings.HasPrefixFold(k, prefix) {
+ if normalize == true {
+ k = strings.ToLower(k)
+ } else {
+ k = http.CanonicalHeaderKey(k)
+ }
out[k[len(prefix):]] = &v[0]
}
}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go
index 07a6187e..b1ae3648 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go
@@ -2,8 +2,8 @@
// requests and responses.
package restxml
-//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/rest-xml.json build_test.go
-//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/rest-xml.json unmarshal_test.go
+//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/rest-xml.json build_test.go
+//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/rest-xml.json unmarshal_test.go
import (
"bytes"
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go
index 05d4ff51..d2f6dae5 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go
@@ -56,7 +56,8 @@ func FormatTime(name string, t time.Time) string {
case ISO8601TimeFormatName:
return t.Format(ISO8601OutputTimeFormat)
case UnixTimeFormatName:
- return strconv.FormatInt(t.Unix(), 10)
+ ms := t.UnixNano() / int64(time.Millisecond)
+ return strconv.FormatFloat(float64(ms)/1e3, 'f', -1, 64)
default:
panic("unknown timestamp format name, " + name)
}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go
index da1a6811..f614ef89 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go
@@ -19,3 +19,9 @@ func UnmarshalDiscardBody(r *request.Request) {
io.Copy(ioutil.Discard, r.HTTPResponse.Body)
r.HTTPResponse.Body.Close()
}
+
+// ResponseMetadata provides the SDK response metadata attributes.
+type ResponseMetadata struct {
+ StatusCode int
+ RequestID string
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go
new file mode 100644
index 00000000..cc857f13
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go
@@ -0,0 +1,65 @@
+package protocol
+
+import (
+ "net/http"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// UnmarshalErrorHandler provides unmarshaling errors API response errors for
+// both typed and untyped errors.
+type UnmarshalErrorHandler struct {
+ unmarshaler ErrorUnmarshaler
+}
+
+// ErrorUnmarshaler is an abstract interface for concrete implementations to
+// unmarshal protocol specific response errors.
+type ErrorUnmarshaler interface {
+ UnmarshalError(*http.Response, ResponseMetadata) (error, error)
+}
+
+// NewUnmarshalErrorHandler returns an UnmarshalErrorHandler
+// initialized for the set of exception names to the error unmarshalers
+func NewUnmarshalErrorHandler(unmarshaler ErrorUnmarshaler) *UnmarshalErrorHandler {
+ return &UnmarshalErrorHandler{
+ unmarshaler: unmarshaler,
+ }
+}
+
+// UnmarshalErrorHandlerName is the name of the named handler.
+const UnmarshalErrorHandlerName = "awssdk.protocol.UnmarshalError"
+
+// NamedHandler returns a NamedHandler for the unmarshaler using the set of
+// errors the unmarshaler was initialized for.
+func (u *UnmarshalErrorHandler) NamedHandler() request.NamedHandler {
+ return request.NamedHandler{
+ Name: UnmarshalErrorHandlerName,
+ Fn: u.UnmarshalError,
+ }
+}
+
+// UnmarshalError will attempt to unmarshal the API response's error message
+// into either a generic SDK error type, or a typed error corresponding to the
+// errors exception name.
+func (u *UnmarshalErrorHandler) UnmarshalError(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+
+ respMeta := ResponseMetadata{
+ StatusCode: r.HTTPResponse.StatusCode,
+ RequestID: r.RequestID,
+ }
+
+ v, err := u.unmarshaler.UnmarshalError(r.HTTPResponse, respMeta)
+ if err != nil {
+ r.Error = awserr.NewRequestFailure(
+ awserr.New(request.ErrCodeSerialization,
+ "failed to unmarshal response error", err),
+ respMeta.StatusCode,
+ respMeta.RequestID,
+ )
+ return
+ }
+
+ r.Error = v
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
index cf981fe9..09ad9515 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
@@ -8,6 +8,7 @@ import (
"reflect"
"sort"
"strconv"
+ "strings"
"time"
"github.com/aws/aws-sdk-go/private/protocol"
@@ -60,6 +61,14 @@ func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag refle
return nil
}
+ xml := tag.Get("xml")
+ if len(xml) != 0 {
+ name := strings.SplitAfterN(xml, ",", 2)[0]
+ if name == "-" {
+ return nil
+ }
+ }
+
t := tag.Get("type")
if t == "" {
switch value.Kind() {
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
index 7108d380..107c053f 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
@@ -64,6 +64,14 @@ func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error {
// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect
// will be used to determine the type from r.
func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ xml := tag.Get("xml")
+ if len(xml) != 0 {
+ name := strings.SplitAfterN(xml, ",", 2)[0]
+ if name == "-" {
+ return nil
+ }
+ }
+
rtype := r.Type()
if rtype.Kind() == reflect.Ptr {
rtype = rtype.Elem() // check kind of actual element type
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go
index b4a4e8c4..8aadf3c5 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go
@@ -7,7 +7,6 @@ import (
"fmt"
"io"
"sync"
- "sync/atomic"
"time"
"github.com/aws/aws-sdk-go/aws"
@@ -15,11 +14,13 @@ import (
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/checksum"
"github.com/aws/aws-sdk-go/private/protocol"
"github.com/aws/aws-sdk-go/private/protocol/eventstream"
"github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi"
"github.com/aws/aws-sdk-go/private/protocol/rest"
"github.com/aws/aws-sdk-go/private/protocol/restxml"
+ "github.com/aws/aws-sdk-go/service/s3/internal/arn"
)
const opAbortMultipartUpload = "AbortMultipartUpload"
@@ -66,11 +67,31 @@ func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req
// AbortMultipartUpload API operation for Amazon Simple Storage Service.
//
-// Aborts a multipart upload.
+// This operation aborts a multipart upload. After a multipart upload is aborted,
+// no additional parts can be uploaded using that upload ID. The storage consumed
+// by any previously uploaded parts will be freed. However, if any part uploads
+// are currently in progress, those part uploads might or might not succeed.
+// As a result, it might be necessary to abort a given multipart upload multiple
+// times in order to completely free all storage consumed by all parts.
//
// To verify that all parts have been removed, so you don't get charged for
-// the part storage, you should call the List Parts operation and ensure the
-// parts list is empty.
+// the part storage, you should call the ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html)
+// operation and ensure that the parts list is empty.
+//
+// For information about permissions required to use the multipart upload API,
+// see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html).
+//
+// The following operations are related to AbortMultipartUpload:
+//
+// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)
+//
+// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)
+//
+// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html)
+//
+// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html)
+//
+// * ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -151,6 +172,65 @@ func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput)
//
// Completes a multipart upload by assembling previously uploaded parts.
//
+// You first initiate the multipart upload and then upload all parts using the
+// UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)
+// operation. After successfully uploading all relevant parts of an upload,
+// you call this operation to complete the upload. Upon receiving this request,
+// Amazon S3 concatenates all the parts in ascending order by part number to
+// create a new object. In the Complete Multipart Upload request, you must provide
+// the parts list. You must ensure that the parts list is complete. This operation
+// concatenates the parts that you provide in the list. For each part in the
+// list, you must provide the part number and the ETag value, returned after
+// that part was uploaded.
+//
+// Processing of a Complete Multipart Upload request could take several minutes
+// to complete. After Amazon S3 begins processing the request, it sends an HTTP
+// response header that specifies a 200 OK response. While processing is in
+// progress, Amazon S3 periodically sends white space characters to keep the
+// connection from timing out. Because a request could fail after the initial
+// 200 OK response has been sent, it is important that you check the response
+// body to determine whether the request succeeded.
+//
+// Note that if CompleteMultipartUpload fails, applications should be prepared
+// to retry the failed requests. For more information, see Amazon S3 Error Best
+// Practices (https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html).
+//
+// For more information about multipart uploads, see Uploading Objects Using
+// Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html).
+//
+// For information about permissions required to use the multipart upload API,
+// see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html).
+//
+// CompleteMultipartUpload has the following special errors:
+//
+// * Error code: EntityTooSmall Description: Your proposed upload is smaller
+// than the minimum allowed object size. Each part must be at least 5 MB
+// in size, except the last part. 400 Bad Request
+//
+// * Error code: InvalidPart Description: One or more of the specified parts
+// could not be found. The part might not have been uploaded, or the specified
+// entity tag might not have matched the part's entity tag. 400 Bad Request
+//
+// * Error code: InvalidPartOrder Description: The list of parts was not
+// in ascending order. The parts list must be specified in order by part
+// number. 400 Bad Request
+//
+// * Error code: NoSuchUpload Description: The specified multipart upload
+// does not exist. The upload ID might be invalid, or the multipart upload
+// might have been aborted or completed. 404 Not Found
+//
+// The following operations are related to CompleteMultipartUpload:
+//
+// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)
+//
+// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)
+//
+// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html)
+//
+// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html)
+//
+// * ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html)
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -225,6 +305,153 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou
//
// Creates a copy of an object that is already stored in Amazon S3.
//
+// You can store individual objects of up to 5 TB in Amazon S3. You create a
+// copy of your object up to 5 GB in size in a single atomic operation using
+// this API. However, to copy an object greater than 5 GB, you must use the
+// multipart upload Upload Part - Copy API. For more information, see Copy Object
+// Using the REST Multipart Upload API (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html).
+//
+// All copy requests must be authenticated. Additionally, you must have read
+// access to the source object and write access to the destination bucket. For
+// more information, see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html).
+// Both the Region that you want to copy the object from and the Region that
+// you want to copy the object to must be enabled for your account.
+//
+// A copy request might return an error when Amazon S3 receives the copy request
+// or while Amazon S3 is copying the files. If the error occurs before the copy
+// operation starts, you receive a standard Amazon S3 error. If the error occurs
+// during the copy operation, the error response is embedded in the 200 OK response.
+// This means that a 200 OK response can contain either a success or an error.
+// Design your application to parse the contents of the response and handle
+// it appropriately.
+//
+// If the copy is successful, you receive a response with information about
+// the copied object.
+//
+// If the request is an HTTP 1.1 request, the response is chunk encoded. If
+// it were not, it would not contain the content-length, and you would need
+// to read the entire body.
+//
+// The copy request charge is based on the storage class and Region that you
+// specify for the destination object. For pricing information, see Amazon S3
+// pricing (https://aws.amazon.com/s3/pricing/).
+//
+// Amazon S3 transfer acceleration does not support cross-Region copies. If
+// you request a cross-Region copy using a transfer acceleration endpoint, you
+// get a 400 Bad Request error. For more information, see Transfer Acceleration
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html).
+//
+// Metadata
+//
+// When copying an object, you can preserve all metadata (default) or specify
+// new metadata. However, the ACL is not preserved and is set to private for
+// the user making the request. To override the default ACL setting, specify
+// a new ACL when generating a copy request. For more information, see Using
+// ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html).
+//
+// To specify whether you want the object metadata copied from the source object
+// or replaced with metadata provided in the request, you can optionally add
+// the x-amz-metadata-directive header. When you grant permissions, you can
+// use the s3:x-amz-metadata-directive condition key to enforce certain metadata
+// behavior when objects are uploaded. For more information, see Specifying
+// Conditions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html)
+// in the Amazon S3 Developer Guide. For a complete list of Amazon S3-specific
+// condition keys, see Actions, Resources, and Condition Keys for Amazon S3
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html).
+//
+// x-amz-copy-source-if Headers
+//
+// To only copy an object under certain conditions, such as whether the Etag
+// matches or whether the object was modified before or after a specified date,
+// use the following request parameters:
+//
+// * x-amz-copy-source-if-match
+//
+// * x-amz-copy-source-if-none-match
+//
+// * x-amz-copy-source-if-unmodified-since
+//
+// * x-amz-copy-source-if-modified-since
+//
+// If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since
+// headers are present in the request and evaluate as follows, Amazon S3 returns
+// 200 OK and copies the data:
+//
+// * x-amz-copy-source-if-match condition evaluates to true
+//
+// * x-amz-copy-source-if-unmodified-since condition evaluates to false
+//
+// If both the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since
+// headers are present in the request and evaluate as follows, Amazon S3 returns
+// the 412 Precondition Failed response code:
+//
+// * x-amz-copy-source-if-none-match condition evaluates to false
+//
+// * x-amz-copy-source-if-modified-since condition evaluates to true
+//
+// All headers with the x-amz- prefix, including x-amz-copy-source, must be
+// signed.
+//
+// Encryption
+//
+// The source object that you are copying can be encrypted or unencrypted. The
+// source object can be encrypted with server-side encryption using AWS managed
+// encryption keys (SSE-S3 or SSE-KMS) or by using a customer-provided encryption
+// key. With server-side encryption, Amazon S3 encrypts your data as it writes
+// it to disks in its data centers and decrypts the data when you access it.
+//
+// You can optionally use the appropriate encryption-related headers to request
+// server-side encryption for the target object. You have the option to provide
+// your own encryption key or use SSE-S3 or SSE-KMS, regardless of the form
+// of server-side encryption that was used to encrypt the source object. You
+// can even request encryption if the source object was not encrypted. For more
+// information about server-side encryption, see Using Server-Side Encryption
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html).
+//
+// Access Control List (ACL)-Specific Request Headers
+//
+// When copying an object, you can optionally use headers to grant ACL-based
+// permissions. By default, all objects are private. Only the owner has full
+// access control. When adding a new object, you can grant permissions to individual
+// AWS accounts or to predefined groups defined by Amazon S3. These permissions
+// are then added to the ACL on the object. For more information, see Access
+// Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html)
+// and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html).
+//
+// Storage Class Options
+//
+// You can use the CopyObject operation to change the storage class of an object
+// that is already stored in Amazon S3 using the StorageClass parameter. For
+// more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html)
+// in the Amazon S3 Service Developer Guide.
+//
+// Versioning
+//
+// By default, x-amz-copy-source identifies the current version of an object
+// to copy. If the current version is a delete marker, Amazon S3 behaves as
+// if the object was deleted. To copy a different version, use the versionId
+// subresource.
+//
+// If you enable versioning on the target bucket, Amazon S3 generates a unique
+// version ID for the object being copied. This version ID is different from
+// the version ID of the source object. Amazon S3 returns the version ID of
+// the copied object in the x-amz-version-id response header in the response.
+//
+// If you do not enable versioning or suspend it on the target bucket, the version
+// ID that Amazon S3 generates is always null.
+//
+// If the source object's storage class is GLACIER, you must restore a copy
+// of this object before you can use it as a source object for the copy operation.
+// For more information, see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html).
+//
+// The following operations are related to CopyObject:
+//
+// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html)
+//
+// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)
+//
+// For more information, see Copying Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html).
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -235,7 +462,7 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou
// Returned Error Codes:
// * ErrCodeObjectNotInActiveTierError "ObjectNotInActiveTierError"
// The source object of the COPY operation is not in the active tier and is
-// only stored in Amazon Glacier.
+// only stored in Amazon S3 Glacier.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject
func (c *S3) CopyObject(input *CopyObjectInput) (*CopyObjectOutput, error) {
@@ -303,7 +530,67 @@ func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request
// CreateBucket API operation for Amazon Simple Storage Service.
//
-// Creates a new bucket.
+// Creates a new bucket. To create a bucket, you must register with Amazon S3
+// and have a valid AWS Access Key ID to authenticate requests. Anonymous requests
+// are never allowed to create buckets. By creating the bucket, you become the
+// bucket owner.
+//
+// Not every string is an acceptable bucket name. For information on bucket
+// naming restrictions, see Working with Amazon S3 Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html).
+//
+// By default, the bucket is created in the US East (N. Virginia) Region. You
+// can optionally specify a Region in the request body. You might choose a Region
+// to optimize latency, minimize costs, or address regulatory requirements.
+// For example, if you reside in Europe, you will probably find it advantageous
+// to create buckets in the Europe (Ireland) Region. For more information, see
+// How to Select a Region for Your Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro).
+//
+// If you send your create bucket request to the s3.amazonaws.com endpoint,
+// the request goes to the us-east-1 Region. Accordingly, the signature calculations
+// in Signature Version 4 must use us-east-1 as the Region, even if the location
+// constraint in the request specifies another Region where the bucket is to
+// be created. If you create a bucket in a Region other than US East (N. Virginia),
+// your application must be able to handle 307 redirect. For more information,
+// see Virtual Hosting of Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html).
+//
+// When creating a bucket using this operation, you can optionally specify the
+// accounts or groups that should be granted specific permissions on the bucket.
+// There are two ways to grant the appropriate permissions using the request
+// headers.
+//
+// * Specify a canned ACL using the x-amz-acl request header. Amazon S3 supports
+// a set of predefined ACLs, known as canned ACLs. Each canned ACL has a
+// predefined set of grantees and permissions. For more information, see
+// Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL).
+//
+// * Specify access permissions explicitly using the x-amz-grant-read, x-amz-grant-write,
+// x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control
+// headers. These headers map to the set of permissions Amazon S3 supports
+// in an ACL. For more information, see Access Control List (ACL) Overview
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). You
+// specify each grantee as a type=value pair, where the type is one of the
+// following: id – if the value specified is the canonical user ID of an
+// AWS account uri – if you are granting permissions to a predefined group
+// emailAddress – if the value specified is the email address of an AWS
+// account Using email addresses to specify a grantee is only supported in
+// the following AWS Regions: US East (N. Virginia) US West (N. California)
+// US West (Oregon) Asia Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific
+// (Tokyo) Europe (Ireland) South America (São Paulo) For a list of all
+// the Amazon S3 supported Regions and endpoints, see Regions and Endpoints
+// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in
+// the AWS General Reference. For example, the following x-amz-grant-read
+// header grants the AWS accounts identified by account IDs permissions to
+// read object data and its metadata: x-amz-grant-read: id="11112222333",
+// id="444455556666"
+//
+// You can use either a canned ACL or specify access permissions explicitly.
+// You cannot do both.
+//
+// The following operations are related to CreateBucket:
+//
+// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html)
+//
+// * DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -318,6 +605,11 @@ func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request
// by all users of the system. Please select a different name and try again.
//
// * ErrCodeBucketAlreadyOwnedByYou "BucketAlreadyOwnedByYou"
+// The bucket you tried to create already exists, and you own it. Amazon S3
+// returns this error in all AWS Regions except in the North Virginia Region.
+// For legacy compatibility, if you re-create an existing bucket that you already
+// own in the North Virginia Region, Amazon S3 returns 200 OK and resets the
+// bucket access control lists (ACLs).
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket
func (c *S3) CreateBucket(input *CreateBucketInput) (*CreateBucketOutput, error) {
@@ -385,13 +677,155 @@ func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (re
// CreateMultipartUpload API operation for Amazon Simple Storage Service.
//
-// Initiates a multipart upload and returns an upload ID.
+// This operation initiates a multipart upload and returns an upload ID. This
+// upload ID is used to associate all of the parts in the specific multipart
+// upload. You specify this upload ID in each of your subsequent upload part
+// requests (see UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)).
+// You also include this upload ID in the final request to either complete or
+// abort the multipart upload request.
//
-// Note: After you initiate multipart upload and upload one or more parts, you
-// must either complete or abort multipart upload in order to stop getting charged
-// for storage of the uploaded parts. Only after you either complete or abort
-// multipart upload, Amazon S3 frees up the parts storage and stops charging
-// you for the parts storage.
+// For more information about multipart uploads, see Multipart Upload Overview
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html).
+//
+// If you have configured a lifecycle rule to abort incomplete multipart uploads,
+// the upload must complete within the number of days specified in the bucket
+// lifecycle configuration. Otherwise, the incomplete multipart upload becomes
+// eligible for an abort operation and Amazon S3 aborts the multipart upload.
+// For more information, see Aborting Incomplete Multipart Uploads Using a Bucket
+// Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config).
+//
+// For information about the permissions required to use the multipart upload
+// API, see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html).
+//
+// For request signing, multipart upload is just a series of regular requests.
+// You initiate a multipart upload, send one or more requests to upload parts,
+// and then complete the multipart upload process. You sign each request individually.
+// There is nothing special about signing multipart upload requests. For more
+// information about signing, see Authenticating Requests (AWS Signature Version
+// 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html).
+//
+// After you initiate a multipart upload and upload one or more parts, to stop
+// being charged for storing the uploaded parts, you must either complete or
+// abort the multipart upload. Amazon S3 frees up the space used to store the
+// parts and stop charging you for storing them only after you either complete
+// or abort a multipart upload.
+//
+// You can optionally request server-side encryption. For server-side encryption,
+// Amazon S3 encrypts your data as it writes it to disks in its data centers
+// and decrypts it when you access it. You can provide your own encryption key,
+// or use AWS Key Management Service (AWS KMS) customer master keys (CMKs) or
+// Amazon S3-managed encryption keys. If you choose to provide your own encryption
+// key, the request headers you provide in UploadPart (AmazonS3/latest/API/API_UploadPart.html)
+// and UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html)
+// requests must match the headers you used in the request to initiate the upload
+// by using CreateMultipartUpload.
+//
+// To perform a multipart upload with encryption using an AWS KMS CMK, the requester
+// must have permission to the kms:Encrypt, kms:Decrypt, kms:ReEncrypt*, kms:GenerateDataKey*,
+// and kms:DescribeKey actions on the key. These permissions are required because
+// Amazon S3 must decrypt and read data from the encrypted file parts before
+// it completes the multipart upload.
+//
+// If your AWS Identity and Access Management (IAM) user or role is in the same
+// AWS account as the AWS KMS CMK, then you must have these permissions on the
+// key policy. If your IAM user or role belongs to a different account than
+// the key, then you must have the permissions on both the key policy and your
+// IAM user or role.
+//
+// For more information, see Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html).
+//
+// Access Permissions
+//
+// When copying an object, you can optionally specify the accounts or groups
+// that should be granted specific permissions on the new object. There are
+// two ways to grant the permissions using the request headers:
+//
+// * Specify a canned ACL with the x-amz-acl request header. For more information,
+// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL).
+//
+// * Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp,
+// x-amz-grant-write-acp, and x-amz-grant-full-control headers. These parameters
+// map to the set of permissions that Amazon S3 supports in an ACL. For more
+// information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html).
+//
+// You can use either a canned ACL or specify access permissions explicitly.
+// You cannot do both.
+//
+// Server-Side- Encryption-Specific Request Headers
+//
+// You can optionally tell Amazon S3 to encrypt data at rest using server-side
+// encryption. Server-side encryption is for data encryption at rest. Amazon
+// S3 encrypts your data as it writes it to disks in its data centers and decrypts
+// it when you access it. The option you use depends on whether you want to
+// use AWS managed encryption keys or provide your own encryption key.
+//
+// * Use encryption keys managed by Amazon S3 or customer master keys (CMKs)
+// stored in AWS Key Management Service (AWS KMS) – If you want AWS to
+// manage the keys used to encrypt data, specify the following headers in
+// the request. x-amz-server-side-encryption x-amz-server-side-encryption-aws-kms-key-id
+// x-amz-server-side-encryption-context If you specify x-amz-server-side-encryption:aws:kms,
+// but don't provide x-amz-server-side-encryption-aws-kms-key-id, Amazon
+// S3 uses the AWS managed CMK in AWS KMS to protect the data. All GET and
+// PUT requests for an object protected by AWS KMS fail if you don't make
+// them with SSL or by using SigV4. For more information about server-side
+// encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data
+// Using Server-Side Encryption with CMKs stored in AWS KMS (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html).
+//
+// * Use customer-provided encryption keys – If you want to manage your
+// own encryption keys, provide all the following headers in the request.
+// x-amz-server-side-encryption-customer-algorithm x-amz-server-side-encryption-customer-key
+// x-amz-server-side-encryption-customer-key-MD5 For more information about
+// server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting
+// Data Using Server-Side Encryption with CMKs stored in AWS KMS (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html).
+//
+// Access-Control-List (ACL)-Specific Request Headers
+//
+// You also can use the following access control–related headers with this
+// operation. By default, all objects are private. Only the owner has full access
+// control. When adding a new object, you can grant permissions to individual
+// AWS accounts or to predefined groups defined by Amazon S3. These permissions
+// are then added to the access control list (ACL) on the object. For more information,
+// see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html).
+// With this operation, you can grant access permissions using one of the following
+// two methods:
+//
+// * Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of predefined
+// ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees
+// and permissions. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL).
+//
+// * Specify access permissions explicitly — To explicitly grant access
+// permissions to specific AWS accounts or groups, use the following headers.
+// Each header maps to specific permissions that Amazon S3 supports in an
+// ACL. For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html).
+// In the header, you specify a list of grantees who get the specific permission.
+// To grant permissions explicitly, use: x-amz-grant-read x-amz-grant-write
+// x-amz-grant-read-acp x-amz-grant-write-acp x-amz-grant-full-control You
+// specify each grantee as a type=value pair, where the type is one of the
+// following: id – if the value specified is the canonical user ID of an
+// AWS account uri – if you are granting permissions to a predefined group
+// emailAddress – if the value specified is the email address of an AWS
+// account Using email addresses to specify a grantee is only supported in
+// the following AWS Regions: US East (N. Virginia) US West (N. California)
+// US West (Oregon) Asia Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific
+// (Tokyo) Europe (Ireland) South America (São Paulo) For a list of all
+// the Amazon S3 supported Regions and endpoints, see Regions and Endpoints
+// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in
+// the AWS General Reference. For example, the following x-amz-grant-read
+// header grants the AWS accounts identified by account IDs permissions to
+// read object data and its metadata: x-amz-grant-read: id="11112222333",
+// id="444455556666"
+//
+// The following operations are related to CreateMultipartUpload:
+//
+// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)
+//
+// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html)
+//
+// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html)
+//
+// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html)
+//
+// * ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -466,8 +900,14 @@ func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request
// DeleteBucket API operation for Amazon Simple Storage Service.
//
-// Deletes the bucket. All objects (including all object versions and Delete
-// Markers) in the bucket must be deleted before the bucket itself can be deleted.
+// Deletes the bucket. All objects (including all object versions and delete
+// markers) in the bucket must be deleted before the bucket itself can be deleted.
+//
+// Related Resources
+//
+// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html)
+//
+// * DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -547,7 +987,20 @@ func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyt
//
// To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration
// action. The bucket owner has this permission by default. The bucket owner
-// can grant this permission to others.
+// can grant this permission to others. For more information about permissions,
+// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
+//
+// For information about the Amazon S3 analytics feature, see Amazon S3 Analytics
+// – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html).
+//
+// The following operations are related to DeleteBucketAnalyticsConfiguration:
+//
+// * GetBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html)
+//
+// * ListBucketAnalyticsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html)
+//
+// * PutBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -622,7 +1075,20 @@ func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request
// DeleteBucketCors API operation for Amazon Simple Storage Service.
//
-// Deletes the CORS configuration information set for the bucket.
+// Deletes the cors configuration information set for the bucket.
+//
+// To use this operation, you must have permission to perform the s3:PutBucketCORS
+// action. The bucket owner has this permission by default and can grant this
+// permission to others.
+//
+// For information about cors, see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html)
+// in the Amazon Simple Storage Service Developer Guide.
+//
+// Related Resources:
+//
+// * PutBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html)
+//
+// * RESTOPTIONSobject (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -697,7 +1163,23 @@ func (c *S3) DeleteBucketEncryptionRequest(input *DeleteBucketEncryptionInput) (
// DeleteBucketEncryption API operation for Amazon Simple Storage Service.
//
-// Deletes the server-side encryption configuration from the bucket.
+// This implementation of the DELETE operation removes default encryption from
+// the bucket. For information about the Amazon S3 default encryption feature,
+// see Amazon S3 Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html)
+// in the Amazon Simple Storage Service Developer Guide.
+//
+// To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration
+// action. The bucket owner has this permission by default. The bucket owner
+// can grant this permission to others. For more information about permissions,
+// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html)
+// in the Amazon Simple Storage Service Developer Guide.
+//
+// Related Resources
+//
+// * PutBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html)
+//
+// * GetBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -775,6 +1257,23 @@ func (c *S3) DeleteBucketInventoryConfigurationRequest(input *DeleteBucketInvent
// Deletes an inventory configuration (identified by the inventory ID) from
// the bucket.
//
+// To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration
+// action. The bucket owner has this permission by default. The bucket owner
+// can grant this permission to others. For more information about permissions,
+// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
+//
+// For information about the Amazon S3 inventory feature, see Amazon S3 Inventory
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html).
+//
+// Operations related to DeleteBucketInventoryConfiguration include:
+//
+// * GetBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html)
+//
+// * PutBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html)
+//
+// * ListBucketInventoryConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html)
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -848,7 +1347,27 @@ func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (re
// DeleteBucketLifecycle API operation for Amazon Simple Storage Service.
//
-// Deletes the lifecycle configuration from the bucket.
+// Deletes the lifecycle configuration from the specified bucket. Amazon S3
+// removes all the lifecycle configuration rules in the lifecycle subresource
+// associated with the bucket. Your objects never expire, and Amazon S3 no longer
+// automatically deletes any objects on the basis of rules contained in the
+// deleted lifecycle configuration.
+//
+// To use this operation, you must have permission to perform the s3:PutLifecycleConfiguration
+// action. By default, the bucket owner has this permission and the bucket owner
+// can grant this permission to others.
+//
+// There is usually some time lag before lifecycle configuration deletion is
+// fully propagated to all the Amazon S3 systems.
+//
+// For more information about the object expiration, see Elements to Describe
+// Lifecycle Actions (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions).
+//
+// Related actions include:
+//
+// * PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html)
+//
+// * GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -923,8 +1442,28 @@ func (c *S3) DeleteBucketMetricsConfigurationRequest(input *DeleteBucketMetricsC
// DeleteBucketMetricsConfiguration API operation for Amazon Simple Storage Service.
//
-// Deletes a metrics configuration (specified by the metrics configuration ID)
-// from the bucket.
+// Deletes a metrics configuration for the Amazon CloudWatch request metrics
+// (specified by the metrics configuration ID) from the bucket. Note that this
+// doesn't include the daily storage metrics.
+//
+// To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration
+// action. The bucket owner has this permission by default. The bucket owner
+// can grant this permission to others. For more information about permissions,
+// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
+//
+// For information about CloudWatch request metrics for Amazon S3, see Monitoring
+// Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html).
+//
+// The following operations are related to DeleteBucketMetricsConfiguration:
+//
+// * GetBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html)
+//
+// * PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html)
+//
+// * ListBucketMetricsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html)
+//
+// * Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -999,7 +1538,29 @@ func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *req
// DeleteBucketPolicy API operation for Amazon Simple Storage Service.
//
-// Deletes the policy from the bucket.
+// This implementation of the DELETE operation uses the policy subresource to
+// delete the policy of a specified bucket. If you are using an identity other
+// than the root user of the AWS account that owns the bucket, the calling identity
+// must have the DeleteBucketPolicy permissions on the specified bucket and
+// belong to the bucket owner's account to use this operation.
+//
+// If you don't have DeleteBucketPolicy permissions, Amazon S3 returns a 403
+// Access Denied error. If you have the correct permissions, but you're not
+// using an identity that belongs to the bucket owner's account, Amazon S3 returns
+// a 405 Method Not Allowed error.
+//
+// As a security precaution, the root user of the AWS account that owns a bucket
+// can always use this operation, even if the policy explicitly denies the root
+// user the ability to perform this action.
+//
+// For more information about bucket policies, see Using Bucket Policies and
+// UserPolicies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html).
+//
+// The following operations are related to DeleteBucketPolicy
+//
+// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html)
+//
+// * DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -1074,10 +1635,26 @@ func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput)
// DeleteBucketReplication API operation for Amazon Simple Storage Service.
//
-// Deletes the replication configuration from the bucket. For information about
-// replication configuration, see Cross-Region Replication (CRR) (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html)
+// Deletes the replication configuration from the bucket.
+//
+// To use this operation, you must have permissions to perform the s3:PutReplicationConfiguration
+// action. The bucket owner has these permissions by default and can grant it
+// to others. For more information about permissions, see Permissions Related
+// to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
+//
+// It can take a while for the deletion of a replication configuration to fully
+// propagate.
+//
+// For information about replication configuration, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html)
// in the Amazon S3 Developer Guide.
//
+// The following operations are related to DeleteBucketReplication:
+//
+// * PutBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html)
+//
+// * GetBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html)
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -1153,6 +1730,16 @@ func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *r
//
// Deletes the tags from the bucket.
//
+// To use this operation, you must have permission to perform the s3:PutBucketTagging
+// action. By default, the bucket owner has this permission and can grant this
+// permission to others.
+//
+// The following operations are related to DeleteBucketTagging:
+//
+// * GetBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html)
+//
+// * PutBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html)
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -1226,7 +1813,26 @@ func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *r
// DeleteBucketWebsite API operation for Amazon Simple Storage Service.
//
-// This operation removes the website configuration from the bucket.
+// This operation removes the website configuration for a bucket. Amazon S3
+// returns a 200 OK response upon successfully deleting a website configuration
+// on the specified bucket. You will get a 200 OK response if the website configuration
+// you are trying to delete does not exist on the bucket. Amazon S3 returns
+// a 404 response if the bucket specified in the request does not exist.
+//
+// This DELETE operation requires the S3:DeleteBucketWebsite permission. By
+// default, only the bucket owner can delete the website configuration attached
+// to a bucket. However, bucket owners can grant other users permission to delete
+// the website configuration by writing a bucket policy granting them the S3:DeleteBucketWebsite
+// permission.
+//
+// For more information about hosting websites, see Hosting Websites on Amazon
+// S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html).
+//
+// The following operations are related to DeleteBucketWebsite:
+//
+// * GetBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketWebsite.html)
+//
+// * PutBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -1304,6 +1910,30 @@ func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request
// marker, which becomes the latest version of the object. If there isn't a
// null version, Amazon S3 does not remove any objects.
//
+// To remove a specific version, you must be the bucket owner and you must use
+// the version Id subresource. Using this subresource permanently deletes the
+// version. If the object deleted is a delete marker, Amazon S3 sets the response
+// header, x-amz-delete-marker, to true.
+//
+// If the object you want to delete is in a bucket where the bucket versioning
+// configuration is MFA Delete enabled, you must include the x-amz-mfa request
+// header in the DELETE versionId request. Requests that include x-amz-mfa must
+// use HTTPS.
+//
+// For more information about MFA Delete, see Using MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html).
+// To see sample requests that use versioning, see Sample Request (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete).
+//
+// You can delete objects by explicitly calling the DELETE Object API or configure
+// its lifecycle (PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html))
+// to enable Amazon S3 to remove them for you. If you want to block users or
+// accounts from removing or deleting objects from your bucket, you must deny
+// them the s3:DeleteObject, s3:DeleteObjectVersion, and s3:PutLifeCycleConfiguration
+// actions.
+//
+// The following operation is related to DeleteObject:
+//
+// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html)
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -1376,7 +2006,21 @@ func (c *S3) DeleteObjectTaggingRequest(input *DeleteObjectTaggingInput) (req *r
// DeleteObjectTagging API operation for Amazon Simple Storage Service.
//
-// Removes the tag-set from an existing object.
+// Removes the entire tag set from the specified object. For more information
+// about managing object tags, see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html).
+//
+// To use this operation, you must have permission to perform the s3:DeleteObjectTagging
+// action.
+//
+// To delete tags of a specific object version, add the versionId query parameter
+// in the request. You will need permission for the s3:DeleteObjectVersionTagging
+// action.
+//
+// The following operations are related to DeleteBucketMetricsConfiguration:
+//
+// * PutObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html)
+//
+// * GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -1445,13 +2089,57 @@ func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Reque
output = &DeleteObjectsOutput{}
req = c.newRequest(op, input, output)
+ req.Handlers.Build.PushBackNamed(request.NamedHandler{
+ Name: "contentMd5Handler",
+ Fn: checksum.AddBodyContentMD5Handler,
+ })
return
}
// DeleteObjects API operation for Amazon Simple Storage Service.
//
// This operation enables you to delete multiple objects from a bucket using
-// a single HTTP request. You may specify up to 1000 keys.
+// a single HTTP request. If you know the object keys that you want to delete,
+// then this operation provides a suitable alternative to sending individual
+// delete requests, reducing per-request overhead.
+//
+// The request contains a list of up to 1000 keys that you want to delete. In
+// the XML, you provide the object key names, and optionally, version IDs if
+// you want to delete a specific version of the object from a versioning-enabled
+// bucket. For each key, Amazon S3 performs a delete operation and returns the
+// result of that delete, success, or failure, in the response. Note that if
+// the object specified in the request is not found, Amazon S3 returns the result
+// as deleted.
+//
+// The operation supports two modes for the response: verbose and quiet. By
+// default, the operation uses verbose mode in which the response includes the
+// result of deletion of each key in your request. In quiet mode the response
+// includes only keys where the delete operation encountered an error. For a
+// successful deletion, the operation does not return any information about
+// the delete in the response body.
+//
+// When performing this operation on an MFA Delete enabled bucket, that attempts
+// to delete any versioned objects, you must include an MFA token. If you do
+// not provide one, the entire request will fail, even if there are non-versioned
+// objects you are trying to delete. If you provide an invalid token, whether
+// there are versioned keys in the request or not, the entire Multi-Object Delete
+// request will fail. For information about MFA Delete, see MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete).
+//
+// Finally, the Content-MD5 header is required for all Multi-Object Delete requests.
+// Amazon S3 uses the header value to ensure that your request body has not
+// been altered in transit.
+//
+// The following operations are related to DeleteObjects:
+//
+// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)
+//
+// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)
+//
+// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html)
+//
+// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html)
+//
+// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -1526,7 +2214,21 @@ func (c *S3) DeletePublicAccessBlockRequest(input *DeletePublicAccessBlockInput)
// DeletePublicAccessBlock API operation for Amazon Simple Storage Service.
//
-// Removes the PublicAccessBlock configuration from an Amazon S3 bucket.
+// Removes the PublicAccessBlock configuration for an Amazon S3 bucket. To use
+// this operation, you must have the s3:PutBucketPublicAccessBlock permission.
+// For more information about permissions, see Permissions Related to Bucket
+// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
+//
+// The following operations are related to DeletePublicAccessBlock:
+//
+// * Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html)
+//
+// * GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html)
+//
+// * PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html)
+//
+// * GetBucketPolicyStatus (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -1600,7 +2302,33 @@ func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateC
// GetBucketAccelerateConfiguration API operation for Amazon Simple Storage Service.
//
-// Returns the accelerate configuration of a bucket.
+// This implementation of the GET operation uses the accelerate subresource
+// to return the Transfer Acceleration state of a bucket, which is either Enabled
+// or Suspended. Amazon S3 Transfer Acceleration is a bucket-level feature that
+// enables you to perform faster data transfers to and from Amazon S3.
+//
+// To use this operation, you must have permission to perform the s3:GetAccelerateConfiguration
+// action. The bucket owner has this permission by default. The bucket owner
+// can grant this permission to others. For more information about permissions,
+// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html)
+// in the Amazon Simple Storage Service Developer Guide.
+//
+// You set the Transfer Acceleration state of an existing bucket to Enabled
+// or Suspended by using the PutBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html)
+// operation.
+//
+// A GET accelerate request does not return a state value for a bucket that
+// has no transfer acceleration state. A bucket has no Transfer Acceleration
+// state if a state has never been set on the bucket.
+//
+// For more information about transfer acceleration, see Transfer Acceleration
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html)
+// in the Amazon Simple Storage Service Developer Guide.
+//
+// Related Resources
+//
+// * PutBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -1674,7 +2402,15 @@ func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request
// GetBucketAcl API operation for Amazon Simple Storage Service.
//
-// Gets the access control policy for the bucket.
+// This implementation of the GET operation uses the acl subresource to return
+// the access control list (ACL) of a bucket. To use GET to return the ACL of
+// the bucket, you must have READ_ACP access to the bucket. If READ_ACP permission
+// is granted to the anonymous user, you can return the ACL of the bucket without
+// using an authorization header.
+//
+// Related Resources
+//
+// * ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -1748,8 +2484,27 @@ func (c *S3) GetBucketAnalyticsConfigurationRequest(input *GetBucketAnalyticsCon
// GetBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service.
//
-// Gets an analytics configuration for the bucket (specified by the analytics
-// configuration ID).
+// This implementation of the GET operation returns an analytics configuration
+// (identified by the analytics configuration ID) from the bucket.
+//
+// To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration
+// action. The bucket owner has this permission by default. The bucket owner
+// can grant this permission to others. For more information about permissions,
+// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html)
+// in the Amazon Simple Storage Service Developer Guide.
+//
+// For information about Amazon S3 analytics feature, see Amazon S3 Analytics
+// – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html)
+// in the Amazon Simple Storage Service Developer Guide.
+//
+// Related Resources
+//
+// * DeleteBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html)
+//
+// * ListBucketAnalyticsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html)
+//
+// * PutBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -1823,7 +2578,20 @@ func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Reque
// GetBucketCors API operation for Amazon Simple Storage Service.
//
-// Returns the CORS configuration for the bucket.
+// Returns the cors configuration information set for the bucket.
+//
+// To use this operation, you must have permission to perform the s3:GetBucketCORS
+// action. By default, the bucket owner has this permission and can grant it
+// to others.
+//
+// For more information about cors, see Enabling Cross-Origin Resource Sharing
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html).
+//
+// The following operations are related to GetBucketCors:
+//
+// * PutBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html)
+//
+// * DeleteBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -1897,7 +2665,21 @@ func (c *S3) GetBucketEncryptionRequest(input *GetBucketEncryptionInput) (req *r
// GetBucketEncryption API operation for Amazon Simple Storage Service.
//
-// Returns the server-side encryption configuration of a bucket.
+// Returns the default encryption configuration for an Amazon S3 bucket. For
+// information about the Amazon S3 default encryption feature, see Amazon S3
+// Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html).
+//
+// To use this operation, you must have permission to perform the s3:GetEncryptionConfiguration
+// action. The bucket owner has this permission by default. The bucket owner
+// can grant this permission to others. For more information about permissions,
+// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
+//
+// The following operations are related to GetBucketEncryption:
+//
+// * PutBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html)
+//
+// * DeleteBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -1971,8 +2753,25 @@ func (c *S3) GetBucketInventoryConfigurationRequest(input *GetBucketInventoryCon
// GetBucketInventoryConfiguration API operation for Amazon Simple Storage Service.
//
-// Returns an inventory configuration (identified by the inventory ID) from
-// the bucket.
+// Returns an inventory configuration (identified by the inventory configuration
+// ID) from the bucket.
+//
+// To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration
+// action. The bucket owner has this permission by default and can grant this
+// permission to others. For more information about permissions, see Permissions
+// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
+//
+// For information about the Amazon S3 inventory feature, see Amazon S3 Inventory
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html).
+//
+// The following operations are related to GetBucketInventoryConfiguration:
+//
+// * DeleteBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html)
+//
+// * ListBucketInventoryConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html)
+//
+// * PutBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -2051,7 +2850,34 @@ func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *req
// GetBucketLifecycle API operation for Amazon Simple Storage Service.
//
-// No longer used, see the GetBucketLifecycleConfiguration operation.
+//
+// For an updated version of this API, see GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html).
+// If you configured a bucket lifecycle using the filter element, you should
+// see the updated version of this topic. This topic is provided for backward
+// compatibility.
+//
+// Returns the lifecycle configuration information set on the bucket. For information
+// about lifecycle configuration, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html).
+//
+// To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration
+// action. The bucket owner has this permission by default. The bucket owner
+// can grant this permission to others. For more information about permissions,
+// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
+//
+// GetBucketLifecycle has the following special error:
+//
+// * Error code: NoSuchLifecycleConfiguration Description: The lifecycle
+// configuration does not exist. HTTP Status Code: 404 Not Found SOAP Fault
+// Code Prefix: Client
+//
+// The following operations are related to GetBucketLifecycle:
+//
+// * GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html)
+//
+// * PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html)
+//
+// * DeleteBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -2129,7 +2955,37 @@ func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleCon
// GetBucketLifecycleConfiguration API operation for Amazon Simple Storage Service.
//
-// Returns the lifecycle configuration information set on the bucket.
+//
+// Bucket lifecycle configuration now supports specifying a lifecycle rule using
+// an object key name prefix, one or more object tags, or a combination of both.
+// Accordingly, this section describes the latest API. The response describes
+// the new filter element that you can use to specify a filter to select a subset
+// of objects to which the rule applies. If you are still using previous version
+// of the lifecycle configuration, it works. For the earlier API description,
+// see GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html).
+//
+// Returns the lifecycle configuration information set on the bucket. For information
+// about lifecycle configuration, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html).
+//
+// To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration
+// action. The bucket owner has this permission, by default. The bucket owner
+// can grant this permission to others. For more information about permissions,
+// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
+//
+// GetBucketLifecycleConfiguration has the following special error:
+//
+// * Error code: NoSuchLifecycleConfiguration Description: The lifecycle
+// configuration does not exist. HTTP Status Code: 404 Not Found SOAP Fault
+// Code Prefix: Client
+//
+// The following operations are related to GetBucketLifecycleConfiguration:
+//
+// * GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html)
+//
+// * PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html)
+//
+// * DeleteBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -2203,7 +3059,17 @@ func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *reque
// GetBucketLocation API operation for Amazon Simple Storage Service.
//
-// Returns the region the bucket resides in.
+// Returns the Region the bucket resides in. You set the bucket's Region using
+// the LocationConstraint request parameter in a CreateBucket request. For more
+// information, see CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html).
+//
+// To use this implementation of the operation, you must be the bucket owner.
+//
+// The following operations are related to GetBucketLocation:
+//
+// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)
+//
+// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -2280,6 +3146,12 @@ func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request
// Returns the logging status of a bucket and the permissions users have to
// view and modify that status. To use GET, you must be the bucket owner.
//
+// The following operations are related to GetBucketLogging:
+//
+// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html)
+//
+// * PutBucketLogging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLogging.html)
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -2353,7 +3225,26 @@ func (c *S3) GetBucketMetricsConfigurationRequest(input *GetBucketMetricsConfigu
// GetBucketMetricsConfiguration API operation for Amazon Simple Storage Service.
//
// Gets a metrics configuration (specified by the metrics configuration ID)
-// from the bucket.
+// from the bucket. Note that this doesn't include the daily storage metrics.
+//
+// To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration
+// action. The bucket owner has this permission by default. The bucket owner
+// can grant this permission to others. For more information about permissions,
+// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
+//
+// For information about CloudWatch request metrics for Amazon S3, see Monitoring
+// Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html).
+//
+// The following operations are related to GetBucketMetricsConfiguration:
+//
+// * PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html)
+//
+// * DeleteBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html)
+//
+// * ListBucketMetricsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html)
+//
+// * Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -2432,7 +3323,7 @@ func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurat
// GetBucketNotification API operation for Amazon Simple Storage Service.
//
-// No longer used, see the GetBucketNotificationConfiguration operation.
+// No longer used, see GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html).
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -2512,6 +3403,22 @@ func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificat
//
// Returns the notification configuration of a bucket.
//
+// If notifications are not enabled on the bucket, the operation returns an
+// empty NotificationConfiguration element.
+//
+// By default, you must be the bucket owner to read the notification configuration
+// of a bucket. However, the bucket owner can use a bucket policy to grant permission
+// to other users to read this configuration with the s3:GetBucketNotification
+// permission.
+//
+// For more information about setting and reading the notification configuration
+// on a bucket, see Setting Up Notification of Bucket Events (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html).
+// For more information about bucket policies, see Using Bucket Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html).
+//
+// The following operation is related to GetBucketNotification:
+//
+// * PutBucketNotification (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotification.html)
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -2584,7 +3491,26 @@ func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.R
// GetBucketPolicy API operation for Amazon Simple Storage Service.
//
-// Returns the policy of a specified bucket.
+// Returns the policy of a specified bucket. If you are using an identity other
+// than the root user of the AWS account that owns the bucket, the calling identity
+// must have the GetBucketPolicy permissions on the specified bucket and belong
+// to the bucket owner's account in order to use this operation.
+//
+// If you don't have GetBucketPolicy permissions, Amazon S3 returns a 403 Access
+// Denied error. If you have the correct permissions, but you're not using an
+// identity that belongs to the bucket owner's account, Amazon S3 returns a
+// 405 Method Not Allowed error.
+//
+// As a security precaution, the root user of the AWS account that owns a bucket
+// can always use this operation, even if the policy explicitly denies the root
+// user the ability to perform this action.
+//
+// For more information about bucket policies, see Using Bucket Policies and
+// User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html).
+//
+// The following operation is related to GetBucketPolicy:
+//
+// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -2659,7 +3585,22 @@ func (c *S3) GetBucketPolicyStatusRequest(input *GetBucketPolicyStatusInput) (re
// GetBucketPolicyStatus API operation for Amazon Simple Storage Service.
//
// Retrieves the policy status for an Amazon S3 bucket, indicating whether the
-// bucket is public.
+// bucket is public. In order to use this operation, you must have the s3:GetBucketPolicyStatus
+// permission. For more information about Amazon S3 permissions, see Specifying
+// Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html).
+//
+// For more information about when Amazon S3 considers a bucket public, see
+// The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status).
+//
+// The following operations are related to GetBucketPolicyStatus:
+//
+// * Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html)
+//
+// * GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html)
+//
+// * PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html)
+//
+// * DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -2739,6 +3680,26 @@ func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req
// to all Amazon S3 systems. Therefore, a get request soon after put or delete
// can return a wrong result.
//
+// For information about replication configuration, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html)
+// in the Amazon Simple Storage Service Developer Guide.
+//
+// This operation requires permissions for the s3:GetReplicationConfiguration
+// action. For more information about permissions, see Using Bucket Policies
+// and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html).
+//
+// If you include the Filter element in a replication configuration, you must
+// also include the DeleteMarkerReplication and Priority elements. The response
+// also returns those elements.
+//
+// For information about GetBucketReplication errors, see List of replication-related
+// error codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList)
+//
+// The following operations are related to GetBucketReplication:
+//
+// * PutBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html)
+//
+// * DeleteBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html)
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -2811,7 +3772,13 @@ func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput)
// GetBucketRequestPayment API operation for Amazon Simple Storage Service.
//
-// Returns the request payment configuration of a bucket.
+// Returns the request payment configuration of a bucket. To use this version
+// of the operation, you must be the bucket owner. For more information, see
+// Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html).
+//
+// The following operations are related to GetBucketRequestPayment:
+//
+// * ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -2887,6 +3854,21 @@ func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request
//
// Returns the tag set associated with the bucket.
//
+// To use this operation, you must have permission to perform the s3:GetBucketTagging
+// action. By default, the bucket owner has this permission and can grant this
+// permission to others.
+//
+// GetBucketTagging has the following special error:
+//
+// * Error code: NoSuchTagSetError Description: There is no tag set associated
+// with the bucket.
+//
+// The following operations are related to GetBucketTagging:
+//
+// * PutBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html)
+//
+// * DeleteBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html)
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -2961,6 +3943,20 @@ func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *r
//
// Returns the versioning state of a bucket.
//
+// To retrieve the versioning state of a bucket, you must be the bucket owner.
+//
+// This implementation also returns the MFA Delete status of the versioning
+// state. If the MFA Delete status is enabled, the bucket owner must use an
+// authentication device to change the versioning state of the bucket.
+//
+// The following operations are related to GetBucketVersioning:
+//
+// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)
+//
+// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html)
+//
+// * DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html)
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -3033,7 +4029,21 @@ func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request
// GetBucketWebsite API operation for Amazon Simple Storage Service.
//
-// Returns the website configuration for a bucket.
+// Returns the website configuration for a bucket. To host website on Amazon
+// S3, you can configure a bucket as website by adding a website configuration.
+// For more information about hosting websites, see Hosting Websites on Amazon
+// S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html).
+//
+// This GET operation requires the S3:GetBucketWebsite permission. By default,
+// only the bucket owner can read the bucket website configuration. However,
+// bucket owners can allow other users to read the website configuration by
+// writing a bucket policy granting them the S3:GetBucketWebsite permission.
+//
+// The following operations are related to DeleteBucketWebsite:
+//
+// * DeleteBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketWebsite.html)
+//
+// * PutBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -3107,7 +4117,132 @@ func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, outp
// GetObject API operation for Amazon Simple Storage Service.
//
-// Retrieves objects from Amazon S3.
+// Retrieves objects from Amazon S3. To use GET, you must have READ access to
+// the object. If you grant READ access to the anonymous user, you can return
+// the object without using an authorization header.
+//
+// An Amazon S3 bucket has no directory hierarchy such as you would find in
+// a typical computer file system. You can, however, create a logical hierarchy
+// by using object key names that imply a folder structure. For example, instead
+// of naming an object sample.jpg, you can name it photos/2006/February/sample.jpg.
+//
+// To get an object from such a logical hierarchy, specify the full key name
+// for the object in the GET operation. For a virtual hosted-style request example,
+// if you have the object photos/2006/February/sample.jpg, specify the resource
+// as /photos/2006/February/sample.jpg. For a path-style request example, if
+// you have the object photos/2006/February/sample.jpg in the bucket named examplebucket,
+// specify the resource as /examplebucket/photos/2006/February/sample.jpg. For
+// more information about request types, see HTTP Host Header Bucket Specification
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket).
+//
+// To distribute large files to many people, you can save bandwidth costs by
+// using BitTorrent. For more information, see Amazon S3 Torrent (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3Torrent.html).
+// For more information about returning the ACL of an object, see GetObjectAcl
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html).
+//
+// If the object you are retrieving is stored in the GLACIER or DEEP_ARCHIVE
+// storage classes, before you can retrieve the object you must first restore
+// a copy using RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html).
+// Otherwise, this operation returns an InvalidObjectStateError error. For information
+// about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html).
+//
+// Encryption request headers, like x-amz-server-side-encryption, should not
+// be sent for GET requests if your object uses server-side encryption with
+// CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed
+// encryption keys (SSE-S3). If your object does use these types of keys, you’ll
+// get an HTTP 400 BadRequest error.
+//
+// If you encrypt an object by using server-side encryption with customer-provided
+// encryption keys (SSE-C) when you store the object in Amazon S3, then when
+// you GET the object, you must use the following headers:
+//
+// * x-amz-server-side-encryption-customer-algorithm
+//
+// * x-amz-server-side-encryption-customer-key
+//
+// * x-amz-server-side-encryption-customer-key-MD5
+//
+// For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided
+// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html).
+//
+// Assuming you have permission to read object tags (permission for the s3:GetObjectVersionTagging
+// action), the response also returns the x-amz-tagging-count header that provides
+// the count of number of tags associated with the object. You can use GetObjectTagging
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html)
+// to retrieve the tag set associated with an object.
+//
+// Permissions
+//
+// You need the s3:GetObject permission for this operation. For more information,
+// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html).
+// If the object you request does not exist, the error Amazon S3 returns depends
+// on whether you also have the s3:ListBucket permission.
+//
+// * If you have the s3:ListBucket permission on the bucket, Amazon S3 will
+// return an HTTP status code 404 ("no such key") error.
+//
+// * If you don’t have the s3:ListBucket permission, Amazon S3 will return
+// an HTTP status code 403 ("access denied") error.
+//
+// Versioning
+//
+// By default, the GET operation returns the current version of an object. To
+// return a different version, use the versionId subresource.
+//
+// If the current version of the object is a delete marker, Amazon S3 behaves
+// as if the object was deleted and includes x-amz-delete-marker: true in the
+// response.
+//
+// For more information about versioning, see PutBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html).
+//
+// Overriding Response Header Values
+//
+// There are times when you want to override certain response header values
+// in a GET response. For example, you might override the Content-Disposition
+// response header value in your GET request.
+//
+// You can override values for a set of response headers using the following
+// query parameters. These response header values are sent only on a successful
+// request, that is, when status code 200 OK is returned. The set of headers
+// you can override using these parameters is a subset of the headers that Amazon
+// S3 accepts when you create an object. The response headers that you can override
+// for the GET response are Content-Type, Content-Language, Expires, Cache-Control,
+// Content-Disposition, and Content-Encoding. To override these header values
+// in the GET response, you use the following request parameters.
+//
+// You must sign the request, either using an Authorization header or a presigned
+// URL, when using these parameters. They cannot be used with an unsigned (anonymous)
+// request.
+//
+// * response-content-type
+//
+// * response-content-language
+//
+// * response-expires
+//
+// * response-cache-control
+//
+// * response-content-disposition
+//
+// * response-content-encoding
+//
+// Additional Considerations about Request Headers
+//
+// If both of the If-Match and If-Unmodified-Since headers are present in the
+// request as follows: If-Match condition evaluates to true, and; If-Unmodified-Since
+// condition evaluates to false; then, S3 returns 200 OK and the data requested.
+//
+// If both of the If-None-Match and If-Modified-Since headers are present in
+// the request as follows:If-None-Match condition evaluates to false, and; If-Modified-Since
+// condition evaluates to true; then, S3 returns 304 Not Modified response code.
+//
+// For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232).
+//
+// The following operations are related to GetObject:
+//
+// * ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html)
+//
+// * GetObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -3186,7 +4321,21 @@ func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request
// GetObjectAcl API operation for Amazon Simple Storage Service.
//
-// Returns the access control list (ACL) of an object.
+// Returns the access control list (ACL) of an object. To use this operation,
+// you must have READ_ACP access to the object.
+//
+// Versioning
+//
+// By default, GET returns ACL information about the current version of an object.
+// To return ACL information about a different version, use the versionId subresource.
+//
+// The following operations are related to GetObjectAcl:
+//
+// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)
+//
+// * DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html)
+//
+// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -3265,7 +4414,8 @@ func (c *S3) GetObjectLegalHoldRequest(input *GetObjectLegalHoldInput) (req *req
// GetObjectLegalHold API operation for Amazon Simple Storage Service.
//
-// Gets an object's current Legal Hold status.
+// Gets an object's current Legal Hold status. For more information, see Locking
+// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html).
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -3339,9 +4489,10 @@ func (c *S3) GetObjectLockConfigurationRequest(input *GetObjectLockConfiguration
// GetObjectLockConfiguration API operation for Amazon Simple Storage Service.
//
-// Gets the object lock configuration for a bucket. The rule specified in the
-// object lock configuration will be applied by default to every new object
-// placed in the specified bucket.
+// Gets the Object Lock configuration for a bucket. The rule specified in the
+// Object Lock configuration will be applied by default to every new object
+// placed in the specified bucket. For more information, see Locking Objects
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html).
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -3415,7 +4566,8 @@ func (c *S3) GetObjectRetentionRequest(input *GetObjectRetentionInput) (req *req
// GetObjectRetention API operation for Amazon Simple Storage Service.
//
-// Retrieves an object's retention settings.
+// Retrieves an object's retention settings. For more information, see Locking
+// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html).
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -3489,7 +4641,25 @@ func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request
// GetObjectTagging API operation for Amazon Simple Storage Service.
//
-// Returns the tag-set of an object.
+// Returns the tag-set of an object. You send the GET request against the tagging
+// subresource associated with the object.
+//
+// To use this operation, you must have permission to perform the s3:GetObjectTagging
+// action. By default, the GET operation returns information about current version
+// of an object. For a versioned bucket, you can have multiple versions of an
+// object in your bucket. To retrieve tags of any other version, use the versionId
+// query parameter. You also need permission for the s3:GetObjectVersionTagging
+// action.
+//
+// By default, the bucket owner has this permission and can grant this permission
+// to others.
+//
+// For information about the Amazon S3 object tagging feature, see Object Tagging
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html).
+//
+// The following operation is related to GetObjectTagging:
+//
+// * PutObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -3563,7 +4733,19 @@ func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request
// GetObjectTorrent API operation for Amazon Simple Storage Service.
//
-// Return torrent files from a bucket.
+// Return torrent files from a bucket. BitTorrent can save you bandwidth when
+// you're distributing large files. For more information about BitTorrent, see
+// Amazon S3 Torrent (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3Torrent.html).
+//
+// You can get torrent only for objects that are less than 5 GB in size and
+// that are not encrypted using server-side encryption with customer-provided
+// encryption key.
+//
+// To use GET, you must have READ access to the object.
+//
+// The following operation is related to GetObjectTorrent:
+//
+// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -3637,7 +4819,30 @@ func (c *S3) GetPublicAccessBlockRequest(input *GetPublicAccessBlockInput) (req
// GetPublicAccessBlock API operation for Amazon Simple Storage Service.
//
-// Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket.
+// Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket. To
+// use this operation, you must have the s3:GetBucketPublicAccessBlock permission.
+// For more information about Amazon S3 permissions, see Specifying Permissions
+// in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html).
+//
+// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket
+// or an object, it checks the PublicAccessBlock configuration for both the
+// bucket (or the bucket that contains the object) and the bucket owner's account.
+// If the PublicAccessBlock settings are different between the bucket and the
+// account, Amazon S3 uses the most restrictive combination of the bucket-level
+// and account-level settings.
+//
+// For more information about when Amazon S3 considers a bucket or an object
+// public, see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status).
+//
+// The following operations are related to GetPublicAccessBlock:
+//
+// * Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html)
+//
+// * PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html)
+//
+// * GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html)
+//
+// * DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -3713,7 +4918,15 @@ func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, ou
// HeadBucket API operation for Amazon Simple Storage Service.
//
// This operation is useful to determine if a bucket exists and you have permission
-// to access it.
+// to access it. The operation returns a 200 OK if the bucket exists and you
+// have permission to access it. Otherwise, the operation might return responses
+// such as 404 Not Found and 403 Forbidden.
+//
+// To use this operation, you must have permissions to perform the s3:ListBucket
+// action. The bucket owner has this permission by default and can grant this
+// permission to others. For more information about permissions, see Permissions
+// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -3796,6 +5009,63 @@ func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, ou
// object itself. This operation is useful if you're only interested in an object's
// metadata. To use HEAD, you must have READ access to the object.
//
+// A HEAD request has the same options as a GET operation on an object. The
+// response is identical to the GET response except that there is no response
+// body.
+//
+// If you encrypt an object by using server-side encryption with customer-provided
+// encryption keys (SSE-C) when you store the object in Amazon S3, then when
+// you retrieve the metadata from the object, you must use the following headers:
+//
+// * x-amz-server-side-encryption-customer-algorithm
+//
+// * x-amz-server-side-encryption-customer-key
+//
+// * x-amz-server-side-encryption-customer-key-MD5
+//
+// For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided
+// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html).
+//
+// Encryption request headers, like x-amz-server-side-encryption, should not
+// be sent for GET requests if your object uses server-side encryption with
+// CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed
+// encryption keys (SSE-S3). If your object does use these types of keys, you’ll
+// get an HTTP 400 BadRequest error.
+//
+// Request headers are limited to 8 KB in size. For more information, see Common
+// Request Headers (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html).
+//
+// Consider the following when using request headers:
+//
+// * Consideration 1 – If both of the If-Match and If-Unmodified-Since
+// headers are present in the request as follows: If-Match condition evaluates
+// to true, and; If-Unmodified-Since condition evaluates to false; Then Amazon
+// S3 returns 200 OK and the data requested.
+//
+// * Consideration 2 – If both of the If-None-Match and If-Modified-Since
+// headers are present in the request as follows: If-None-Match condition
+// evaluates to false, and; If-Modified-Since condition evaluates to true;
+// Then Amazon S3 returns the 304 Not Modified response code.
+//
+// For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232).
+//
+// Permissions
+//
+// You need the s3:GetObject permission for this operation. For more information,
+// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html).
+// If the object you request does not exist, the error Amazon S3 returns depends
+// on whether you also have the s3:ListBucket permission.
+//
+// * If you have the s3:ListBucket permission on the bucket, Amazon S3 returns
+// an HTTP status code 404 ("no such key") error.
+//
+// * If you don’t have the s3:ListBucket permission, Amazon S3 returns
+// an HTTP status code 403 ("access denied") error.
+//
+// The following operation is related to HeadObject:
+//
+// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)
+//
// See http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#RESTErrorResponses
// for more information on returned errors.
//
@@ -3871,7 +5141,33 @@ func (c *S3) ListBucketAnalyticsConfigurationsRequest(input *ListBucketAnalytics
// ListBucketAnalyticsConfigurations API operation for Amazon Simple Storage Service.
//
-// Lists the analytics configurations for the bucket.
+// Lists the analytics configurations for the bucket. You can have up to 1,000
+// analytics configurations per bucket.
+//
+// This operation supports list pagination and does not return more than 100
+// configurations at a time. You should always check the IsTruncated element
+// in the response. If there are no more configurations to list, IsTruncated
+// is set to false. If there are more configurations to list, IsTruncated is
+// set to true, and there will be a value in NextContinuationToken. You use
+// the NextContinuationToken value to continue the pagination of the list by
+// passing the value in continuation-token in the request to GET the next page.
+//
+// To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration
+// action. The bucket owner has this permission by default. The bucket owner
+// can grant this permission to others. For more information about permissions,
+// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
+//
+// For information about Amazon S3 analytics feature, see Amazon S3 Analytics
+// – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html).
+//
+// The following operations are related to ListBucketAnalyticsConfigurations:
+//
+// * GetBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html)
+//
+// * DeleteBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html)
+//
+// * PutBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -3945,7 +5241,33 @@ func (c *S3) ListBucketInventoryConfigurationsRequest(input *ListBucketInventory
// ListBucketInventoryConfigurations API operation for Amazon Simple Storage Service.
//
-// Returns a list of inventory configurations for the bucket.
+// Returns a list of inventory configurations for the bucket. You can have up
+// to 1,000 analytics configurations per bucket.
+//
+// This operation supports list pagination and does not return more than 100
+// configurations at a time. Always check the IsTruncated element in the response.
+// If there are no more configurations to list, IsTruncated is set to false.
+// If there are more configurations to list, IsTruncated is set to true, and
+// there is a value in NextContinuationToken. You use the NextContinuationToken
+// value to continue the pagination of the list by passing the value in continuation-token
+// in the request to GET the next page.
+//
+// To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration
+// action. The bucket owner has this permission by default. The bucket owner
+// can grant this permission to others. For more information about permissions,
+// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
+//
+// For information about the Amazon S3 inventory feature, see Amazon S3 Inventory
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html)
+//
+// The following operations are related to ListBucketInventoryConfigurations:
+//
+// * GetBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html)
+//
+// * DeleteBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html)
+//
+// * PutBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -4019,7 +5341,34 @@ func (c *S3) ListBucketMetricsConfigurationsRequest(input *ListBucketMetricsConf
// ListBucketMetricsConfigurations API operation for Amazon Simple Storage Service.
//
-// Lists the metrics configurations for the bucket.
+// Lists the metrics configurations for the bucket. The metrics configurations
+// are only for the request metrics of the bucket and do not provide information
+// on daily storage metrics. You can have up to 1,000 configurations per bucket.
+//
+// This operation supports list pagination and does not return more than 100
+// configurations at a time. Always check the IsTruncated element in the response.
+// If there are no more configurations to list, IsTruncated is set to false.
+// If there are more configurations to list, IsTruncated is set to true, and
+// there is a value in NextContinuationToken. You use the NextContinuationToken
+// value to continue the pagination of the list by passing the value in continuation-token
+// in the request to GET the next page.
+//
+// To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration
+// action. The bucket owner has this permission by default. The bucket owner
+// can grant this permission to others. For more information about permissions,
+// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
+//
+// For more information about metrics configurations and CloudWatch request
+// metrics, see Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html).
+//
+// The following operations are related to ListBucketMetricsConfigurations:
+//
+// * PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html)
+//
+// * GetBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html)
+//
+// * DeleteBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -4173,7 +5522,40 @@ func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req
// ListMultipartUploads API operation for Amazon Simple Storage Service.
//
-// This operation lists in-progress multipart uploads.
+// This operation lists in-progress multipart uploads. An in-progress multipart
+// upload is a multipart upload that has been initiated using the Initiate Multipart
+// Upload request, but has not yet been completed or aborted.
+//
+// This operation returns at most 1,000 multipart uploads in the response. 1,000
+// multipart uploads is the maximum number of uploads a response can include,
+// which is also the default value. You can further limit the number of uploads
+// in a response by specifying the max-uploads parameter in the response. If
+// additional multipart uploads satisfy the list criteria, the response will
+// contain an IsTruncated element with the value true. To list the additional
+// multipart uploads, use the key-marker and upload-id-marker request parameters.
+//
+// In the response, the uploads are sorted by key. If your application has initiated
+// more than one multipart upload using the same object key, then uploads in
+// the response are first sorted by key. Additionally, uploads are sorted in
+// ascending order within each key by the upload initiation time.
+//
+// For more information on multipart uploads, see Uploading Objects Using Multipart
+// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html).
+//
+// For information on permissions required to use the multipart upload API,
+// see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html).
+//
+// The following operations are related to ListMultipartUploads:
+//
+// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)
+//
+// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)
+//
+// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html)
+//
+// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html)
+//
+// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -4246,10 +5628,12 @@ func (c *S3) ListMultipartUploadsPagesWithContext(ctx aws.Context, input *ListMu
},
}
- cont := true
- for p.Next() && cont {
- cont = fn(p.Page().(*ListMultipartUploadsOutput), !p.HasNextPage())
+ for p.Next() {
+ if !fn(p.Page().(*ListMultipartUploadsOutput), !p.HasNextPage()) {
+ break
+ }
}
+
return p.Err()
}
@@ -4303,7 +5687,24 @@ func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *req
// ListObjectVersions API operation for Amazon Simple Storage Service.
//
-// Returns metadata about all of the versions of objects in a bucket.
+// Returns metadata about all of the versions of objects in a bucket. You can
+// also use request parameters as selection criteria to return metadata about
+// a subset of all the object versions.
+//
+// A 200 OK response can contain valid or invalid XML. Make sure to design your
+// application to parse the contents of the response and handle it appropriately.
+//
+// To use this operation, you must have READ access to the bucket.
+//
+// The following operations are related to ListObjectVersions:
+//
+// * ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html)
+//
+// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)
+//
+// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html)
+//
+// * DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -4376,10 +5777,12 @@ func (c *S3) ListObjectVersionsPagesWithContext(ctx aws.Context, input *ListObje
},
}
- cont := true
- for p.Next() && cont {
- cont = fn(p.Page().(*ListObjectVersionsOutput), !p.HasNextPage())
+ for p.Next() {
+ if !fn(p.Page().(*ListObjectVersionsOutput), !p.HasNextPage()) {
+ break
+ }
}
+
return p.Err()
}
@@ -4433,9 +5836,28 @@ func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request,
// ListObjects API operation for Amazon Simple Storage Service.
//
-// Returns some or all (up to 1000) of the objects in a bucket. You can use
+// Returns some or all (up to 1,000) of the objects in a bucket. You can use
// the request parameters as selection criteria to return a subset of the objects
-// in a bucket.
+// in a bucket. A 200 OK response can contain valid or invalid XML. Be sure
+// to design your application to parse the contents of the response and handle
+// it appropriately.
+//
+// This API has been revised. We recommend that you use the newer version, ListObjectsV2
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html),
+// when developing applications. For backward compatibility, Amazon S3 continues
+// to support ListObjects.
+//
+// The following operations are related to ListObjects:
+//
+// * ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html)
+//
+// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)
+//
+// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html)
+//
+// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html)
+//
+// * ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -4513,10 +5935,12 @@ func (c *S3) ListObjectsPagesWithContext(ctx aws.Context, input *ListObjectsInpu
},
}
- cont := true
- for p.Next() && cont {
- cont = fn(p.Page().(*ListObjectsOutput), !p.HasNextPage())
+ for p.Next() {
+ if !fn(p.Page().(*ListObjectsOutput), !p.HasNextPage()) {
+ break
+ }
}
+
return p.Err()
}
@@ -4570,10 +5994,35 @@ func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Reque
// ListObjectsV2 API operation for Amazon Simple Storage Service.
//
-// Returns some or all (up to 1000) of the objects in a bucket. You can use
+// Returns some or all (up to 1,000) of the objects in a bucket. You can use
// the request parameters as selection criteria to return a subset of the objects
-// in a bucket. Note: ListObjectsV2 is the revised List Objects API and we recommend
-// you use this revised API for new application development.
+// in a bucket. A 200 OK response can contain valid or invalid XML. Make sure
+// to design your application to parse the contents of the response and handle
+// it appropriately.
+//
+// To use this operation, you must have READ access to the bucket.
+//
+// To use this operation in an AWS Identity and Access Management (IAM) policy,
+// you must have permissions to perform the s3:ListBucket action. The bucket
+// owner has this permission by default and can grant this permission to others.
+// For more information about permissions, see Permissions Related to Bucket
+// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
+//
+// This section describes the latest revision of the API. We recommend that
+// you use this revised API for application development. For backward compatibility,
+// Amazon S3 continues to support the prior version of this API, ListObjects
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html).
+//
+// To get a list of your buckets, see ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html).
+//
+// The following operations are related to ListObjectsV2:
+//
+// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)
+//
+// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html)
+//
+// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -4651,10 +6100,12 @@ func (c *S3) ListObjectsV2PagesWithContext(ctx aws.Context, input *ListObjectsV2
},
}
- cont := true
- for p.Next() && cont {
- cont = fn(p.Page().(*ListObjectsV2Output), !p.HasNextPage())
+ for p.Next() {
+ if !fn(p.Page().(*ListObjectsV2Output), !p.HasNextPage()) {
+ break
+ }
}
+
return p.Err()
}
@@ -4709,6 +6160,33 @@ func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, outp
// ListParts API operation for Amazon Simple Storage Service.
//
// Lists the parts that have been uploaded for a specific multipart upload.
+// This operation must include the upload ID, which you obtain by sending the
+// initiate multipart upload request (see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)).
+// This request returns a maximum of 1,000 uploaded parts. The default number
+// of parts returned is 1,000 parts. You can restrict the number of parts returned
+// by specifying the max-parts request parameter. If your multipart upload consists
+// of more than 1,000 parts, the response returns an IsTruncated field with
+// the value of true, and a NextPartNumberMarker element. In subsequent ListParts
+// requests you can include the part-number-marker query string parameter and
+// set its value to the NextPartNumberMarker field value from the previous response.
+//
+// For more information on multipart uploads, see Uploading Objects Using Multipart
+// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html).
+//
+// For information on permissions required to use the multipart upload API,
+// see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html).
+//
+// The following operations are related to ListParts:
+//
+// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)
+//
+// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)
+//
+// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html)
+//
+// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html)
+//
+// * ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -4781,10 +6259,12 @@ func (c *S3) ListPartsPagesWithContext(ctx aws.Context, input *ListPartsInput, f
},
}
- cont := true
- for p.Next() && cont {
- cont = fn(p.Page().(*ListPartsOutput), !p.HasNextPage())
+ for p.Next() {
+ if !fn(p.Page().(*ListPartsOutput), !p.HasNextPage()) {
+ break
+ }
}
+
return p.Err()
}
@@ -4833,7 +6313,41 @@ func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateC
// PutBucketAccelerateConfiguration API operation for Amazon Simple Storage Service.
//
-// Sets the accelerate configuration of an existing bucket.
+// Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer
+// Acceleration is a bucket-level feature that enables you to perform faster
+// data transfers to Amazon S3.
+//
+// To use this operation, you must have permission to perform the s3:PutAccelerateConfiguration
+// action. The bucket owner has this permission by default. The bucket owner
+// can grant this permission to others. For more information about permissions,
+// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
+//
+// The Transfer Acceleration state of a bucket can be set to one of the following
+// two values:
+//
+// * Enabled – Enables accelerated data transfers to the bucket.
+//
+// * Suspended – Disables accelerated data transfers to the bucket.
+//
+// The GetBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html)
+// operation returns the transfer acceleration state of a bucket.
+//
+// After setting the Transfer Acceleration state of a bucket to Enabled, it
+// might take up to thirty minutes before the data transfer rates to the bucket
+// increase.
+//
+// The name of the bucket used for Transfer Acceleration must be DNS-compliant
+// and must not contain periods (".").
+//
+// For more information about transfer acceleration, see Transfer Acceleration
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html).
+//
+// The following operations are related to PutBucketAccelerateConfiguration:
+//
+// * GetBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html)
+//
+// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -4903,12 +6417,101 @@ func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request
output = &PutBucketAclOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ req.Handlers.Build.PushBackNamed(request.NamedHandler{
+ Name: "contentMd5Handler",
+ Fn: checksum.AddBodyContentMD5Handler,
+ })
return
}
// PutBucketAcl API operation for Amazon Simple Storage Service.
//
-// Sets the permissions on a bucket using access control lists (ACL).
+// Sets the permissions on an existing bucket using access control lists (ACL).
+// For more information, see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html).
+// To set the ACL of a bucket, you must have WRITE_ACP permission.
+//
+// You can use one of the following two ways to set a bucket's permissions:
+//
+// * Specify the ACL in the request body
+//
+// * Specify permissions using request headers
+//
+// You cannot specify access permission using both the body and the request
+// headers.
+//
+// Depending on your application needs, you may choose to set the ACL on a bucket
+// using either the request body or the headers. For example, if you have an
+// existing application that updates a bucket ACL using the request body, then
+// you can continue to use that approach.
+//
+// Access Permissions
+//
+// You can set access permissions using one of the following methods:
+//
+// * Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports
+// a set of predefined ACLs, known as canned ACLs. Each canned ACL has a
+// predefined set of grantees and permissions. Specify the canned ACL name
+// as the value of x-amz-acl. If you use this header, you cannot use other
+// access control-specific headers in your request. For more information,
+// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL).
+//
+// * Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp,
+// x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using
+// these headers, you specify explicit access permissions and grantees (AWS
+// accounts or Amazon S3 groups) who will receive the permission. If you
+// use these ACL-specific headers, you cannot use the x-amz-acl header to
+// set a canned ACL. These parameters map to the set of permissions that
+// Amazon S3 supports in an ACL. For more information, see Access Control
+// List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html).
+// You specify each grantee as a type=value pair, where the type is one of
+// the following: id – if the value specified is the canonical user ID
+// of an AWS account uri – if you are granting permissions to a predefined
+// group emailAddress – if the value specified is the email address of
+// an AWS account Using email addresses to specify a grantee is only supported
+// in the following AWS Regions: US East (N. Virginia) US West (N. California)
+// US West (Oregon) Asia Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific
+// (Tokyo) Europe (Ireland) South America (São Paulo) For a list of all
+// the Amazon S3 supported Regions and endpoints, see Regions and Endpoints
+// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in
+// the AWS General Reference. For example, the following x-amz-grant-write
+// header grants create, overwrite, and delete objects permission to LogDelivery
+// group predefined by Amazon S3 and two AWS accounts identified by their
+// email addresses. x-amz-grant-write: uri="http://acs.amazonaws.com/groups/s3/LogDelivery",
+// id="111122223333", id="555566667777"
+//
+// You can use either a canned ACL or specify access permissions explicitly.
+// You cannot do both.
+//
+// Grantee Values
+//
+// You can specify the person (grantee) to whom you're assigning access rights
+// (using request elements) in the following ways:
+//
+// * By the person's ID: <>ID<><>GranteesEmail<>
+// DisplayName is optional and ignored in the request
+//
+// * By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<>
+//
+// * By Email address: <>Grantees@email.com<>lt;/Grantee>
+// The grantee is resolved to the CanonicalUser and, in a response to a GET
+// Object acl request, appears as the CanonicalUser. Using email addresses
+// to specify a grantee is only supported in the following AWS Regions: US
+// East (N. Virginia) US West (N. California) US West (Oregon) Asia Pacific
+// (Singapore) Asia Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland)
+// South America (São Paulo) For a list of all the Amazon S3 supported Regions
+// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region)
+// in the AWS General Reference.
+//
+// Related Resources
+//
+// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html)
+//
+// * DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html)
+//
+// * GetObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -4984,7 +6587,50 @@ func (c *S3) PutBucketAnalyticsConfigurationRequest(input *PutBucketAnalyticsCon
// PutBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service.
//
// Sets an analytics configuration for the bucket (specified by the analytics
-// configuration ID).
+// configuration ID). You can have up to 1,000 analytics configurations per
+// bucket.
+//
+// You can choose to have storage class analysis export analysis reports sent
+// to a comma-separated values (CSV) flat file. See the DataExport request element.
+// Reports are updated daily and are based on the object filters that you configure.
+// When selecting data export, you specify a destination bucket and an optional
+// destination prefix where the file is written. You can export the data to
+// a destination bucket in a different account. However, the destination bucket
+// must be in the same Region as the bucket that you are making the PUT analytics
+// configuration to. For more information, see Amazon S3 Analytics – Storage
+// Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html).
+//
+// You must create a bucket policy on the destination bucket where the exported
+// file is written to grant permissions to Amazon S3 to write objects to the
+// bucket. For an example policy, see Granting Permissions for Amazon S3 Inventory
+// and Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9).
+//
+// To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration
+// action. The bucket owner has this permission by default. The bucket owner
+// can grant this permission to others. For more information about permissions,
+// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
+//
+// Special Errors
+//
+// * HTTP Error: HTTP 400 Bad Request Code: InvalidArgument Cause: Invalid
+// argument.
+//
+// * HTTP Error: HTTP 400 Bad Request Code: TooManyConfigurations Cause:
+// You are attempting to create a new configuration but have already reached
+// the 1,000-configuration limit.
+//
+// * HTTP Error: HTTP 403 Forbidden Code: AccessDenied Cause: You are not
+// the owner of the specified bucket, or you do not have the s3:PutAnalyticsConfiguration
+// bucket permission to set the configuration on the bucket.
+//
+// Related Resources
+//
+// * GetBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html)
+//
+// * DeleteBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html)
+//
+// * ListBucketAnalyticsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -5054,12 +6700,58 @@ func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Reque
output = &PutBucketCorsOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ req.Handlers.Build.PushBackNamed(request.NamedHandler{
+ Name: "contentMd5Handler",
+ Fn: checksum.AddBodyContentMD5Handler,
+ })
return
}
// PutBucketCors API operation for Amazon Simple Storage Service.
//
-// Sets the CORS configuration for a bucket.
+// Sets the cors configuration for your bucket. If the configuration exists,
+// Amazon S3 replaces it.
+//
+// To use this operation, you must be allowed to perform the s3:PutBucketCORS
+// action. By default, the bucket owner has this permission and can grant it
+// to others.
+//
+// You set this configuration on a bucket so that the bucket can service cross-origin
+// requests. For example, you might want to enable a request whose origin is
+// http://www.example.com to access your Amazon S3 bucket at my.example.bucket.com
+// by using the browser's XMLHttpRequest capability.
+//
+// To enable cross-origin resource sharing (CORS) on a bucket, you add the cors
+// subresource to the bucket. The cors subresource is an XML document in which
+// you configure rules that identify origins and the HTTP methods that can be
+// executed on your bucket. The document is limited to 64 KB in size.
+//
+// When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS request)
+// against a bucket, it evaluates the cors configuration on the bucket and uses
+// the first CORSRule rule that matches the incoming browser request to enable
+// a cross-origin request. For a rule to match, the following conditions must
+// be met:
+//
+// * The request's Origin header must match AllowedOrigin elements.
+//
+// * The request method (for example, GET, PUT, HEAD, and so on) or the Access-Control-Request-Method
+// header in case of a pre-flight OPTIONS request must be one of the AllowedMethod
+// elements.
+//
+// * Every header specified in the Access-Control-Request-Headers request
+// header of a pre-flight request must match an AllowedHeader element.
+//
+// For more information about CORS, go to Enabling Cross-Origin Resource Sharing
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon
+// Simple Storage Service Developer Guide.
+//
+// Related Resources
+//
+// * GetBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketCors.html)
+//
+// * DeleteBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html)
+//
+// * RESTOPTIONSobject (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -5129,13 +6821,38 @@ func (c *S3) PutBucketEncryptionRequest(input *PutBucketEncryptionInput) (req *r
output = &PutBucketEncryptionOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ req.Handlers.Build.PushBackNamed(request.NamedHandler{
+ Name: "contentMd5Handler",
+ Fn: checksum.AddBodyContentMD5Handler,
+ })
return
}
// PutBucketEncryption API operation for Amazon Simple Storage Service.
//
-// Creates a new server-side encryption configuration (or replaces an existing
-// one, if present).
+// This implementation of the PUT operation uses the encryption subresource
+// to set the default encryption state of an existing bucket.
+//
+// This implementation of the PUT operation sets default encryption for a bucket
+// using server-side encryption with Amazon S3-managed keys SSE-S3 or AWS KMS
+// customer master keys (CMKs) (SSE-KMS). For information about the Amazon S3
+// default encryption feature, see Amazon S3 Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html).
+//
+// This operation requires AWS Signature Version 4. For more information, see
+// Authenticating Requests (AWS Signature Version 4) (sig-v4-authenticating-requests.html).
+//
+// To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration
+// action. The bucket owner has this permission by default. The bucket owner
+// can grant this permission to others. For more information about permissions,
+// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html)
+// in the Amazon Simple Storage Service Developer Guide.
+//
+// Related Resources
+//
+// * GetBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html)
+//
+// * DeleteBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -5210,8 +6927,54 @@ func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryCon
// PutBucketInventoryConfiguration API operation for Amazon Simple Storage Service.
//
-// Adds an inventory configuration (identified by the inventory ID) from the
-// bucket.
+// This implementation of the PUT operation adds an inventory configuration
+// (identified by the inventory ID) to the bucket. You can have up to 1,000
+// inventory configurations per bucket.
+//
+// Amazon S3 inventory generates inventories of the objects in the bucket on
+// a daily or weekly basis, and the results are published to a flat file. The
+// bucket that is inventoried is called the source bucket, and the bucket where
+// the inventory flat file is stored is called the destination bucket. The destination
+// bucket must be in the same AWS Region as the source bucket.
+//
+// When you configure an inventory for a source bucket, you specify the destination
+// bucket where you want the inventory to be stored, and whether to generate
+// the inventory daily or weekly. You can also configure what object metadata
+// to include and whether to inventory all object versions or only current versions.
+// For more information, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html)
+// in the Amazon Simple Storage Service Developer Guide.
+//
+// You must create a bucket policy on the destination bucket to grant permissions
+// to Amazon S3 to write objects to the bucket in the defined location. For
+// an example policy, see Granting Permissions for Amazon S3 Inventory and Storage
+// Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9).
+//
+// To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration
+// action. The bucket owner has this permission by default and can grant this
+// permission to others. For more information about permissions, see Permissions
+// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html)
+// in the Amazon Simple Storage Service Developer Guide.
+//
+// Special Errors
+//
+// * HTTP 400 Bad Request Error Code: InvalidArgument Cause: Invalid Argument
+//
+// * HTTP 400 Bad Request Error Code: TooManyConfigurations Cause: You are
+// attempting to create a new configuration but have already reached the
+// 1,000-configuration limit.
+//
+// * HTTP 403 Forbidden Error Code: AccessDenied Cause: You are not the owner
+// of the specified bucket, or you do not have the s3:PutInventoryConfiguration
+// bucket permission to set the configuration on the bucket.
+//
+// Related Resources
+//
+// * GetBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html)
+//
+// * DeleteBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html)
+//
+// * ListBucketInventoryConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -5286,12 +7049,64 @@ func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *req
output = &PutBucketLifecycleOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ req.Handlers.Build.PushBackNamed(request.NamedHandler{
+ Name: "contentMd5Handler",
+ Fn: checksum.AddBodyContentMD5Handler,
+ })
return
}
// PutBucketLifecycle API operation for Amazon Simple Storage Service.
//
-// No longer used, see the PutBucketLifecycleConfiguration operation.
+//
+// For an updated version of this API, see PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html).
+// This version has been deprecated. Existing lifecycle configurations will
+// work. For new lifecycle configurations, use the updated API.
+//
+// Creates a new lifecycle configuration for the bucket or replaces an existing
+// lifecycle configuration. For information about lifecycle configuration, see
+// Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html)
+// in the Amazon Simple Storage Service Developer Guide.
+//
+// By default, all Amazon S3 resources, including buckets, objects, and related
+// subresources (for example, lifecycle configuration and website configuration)
+// are private. Only the resource owner, the AWS account that created the resource,
+// can access it. The resource owner can optionally grant access permissions
+// to others by writing an access policy. For this operation, users must get
+// the s3:PutLifecycleConfiguration permission.
+//
+// You can also explicitly deny permissions. Explicit denial also supersedes
+// any other permissions. If you want to prevent users or accounts from removing
+// or deleting objects from your bucket, you must deny them permissions for
+// the following actions:
+//
+// * s3:DeleteObject
+//
+// * s3:DeleteObjectVersion
+//
+// * s3:PutLifecycleConfiguration
+//
+// For more information about permissions, see Managing Access Permissions to
+// your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html)
+// in the Amazon Simple Storage Service Developer Guide.
+//
+// For more examples of transitioning objects to storage classes such as STANDARD_IA
+// or ONEZONE_IA, see Examples of Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#lifecycle-configuration-examples).
+//
+// Related Resources
+//
+// * GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html)(Deprecated)
+//
+// * GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html)
+//
+// * RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html)
+//
+// * By default, a resource owner—in this case, a bucket owner, which is
+// the AWS account that created the bucket—can perform any of the operations.
+// A resource owner can also grant others permission to perform the operation.
+// For more information, see the following topics in the Amazon Simple Storage
+// Service Developer Guide: Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html)
+// Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -5365,24 +7180,89 @@ func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleCon
output = &PutBucketLifecycleConfigurationOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ req.Handlers.Build.PushBackNamed(request.NamedHandler{
+ Name: "contentMd5Handler",
+ Fn: checksum.AddBodyContentMD5Handler,
+ })
return
}
// PutBucketLifecycleConfiguration API operation for Amazon Simple Storage Service.
//
-// Sets lifecycle configuration for your bucket. If a lifecycle configuration
-// exists, it replaces it.
+// Creates a new lifecycle configuration for the bucket or replaces an existing
+// lifecycle configuration. For information about lifecycle configuration, see
+// Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
+// Bucket lifecycle configuration now supports specifying a lifecycle rule using
+// an object key name prefix, one or more object tags, or a combination of both.
+// Accordingly, this section describes the latest API. The previous version
+// of the API supported filtering based only on an object key name prefix, which
+// is supported for backward compatibility. For the related API description,
+// see PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html).
//
-// See the AWS API reference guide for Amazon Simple Storage Service's
-// API operation PutBucketLifecycleConfiguration for usage and error information.
-// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration
-func (c *S3) PutBucketLifecycleConfiguration(input *PutBucketLifecycleConfigurationInput) (*PutBucketLifecycleConfigurationOutput, error) {
- req, out := c.PutBucketLifecycleConfigurationRequest(input)
- return out, req.Send()
+// Rules
+//
+// You specify the lifecycle configuration in your request body. The lifecycle
+// configuration is specified as XML consisting of one or more rules. Each rule
+// consists of the following:
+//
+// * Filter identifying a subset of objects to which the rule applies. The
+// filter can be based on a key name prefix, object tags, or a combination
+// of both.
+//
+// * Status whether the rule is in effect.
+//
+// * One or more lifecycle transition and expiration actions that you want
+// Amazon S3 to perform on the objects identified by the filter. If the state
+// of your bucket is versioning-enabled or versioning-suspended, you can
+// have many versions of the same object (one current version and zero or
+// more noncurrent versions). Amazon S3 provides predefined actions that
+// you can specify for current and noncurrent object versions.
+//
+// For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html)
+// and Lifecycle Configuration Elements (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html).
+//
+// Permissions
+//
+// By default, all Amazon S3 resources are private, including buckets, objects,
+// and related subresources (for example, lifecycle configuration and website
+// configuration). Only the resource owner (that is, the AWS account that created
+// it) can access the resource. The resource owner can optionally grant access
+// permissions to others by writing an access policy. For this operation, a
+// user must get the s3:PutLifecycleConfiguration permission.
+//
+// You can also explicitly deny permissions. Explicit deny also supersedes any
+// other permissions. If you want to block users or accounts from removing or
+// deleting objects from your bucket, you must deny them permissions for the
+// following actions:
+//
+// * s3:DeleteObject
+//
+// * s3:DeleteObjectVersion
+//
+// * s3:PutLifecycleConfiguration
+//
+// For more information about permissions, see Managing Access Permissions to
+// Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
+//
+// The following are related to PutBucketLifecycleConfiguration:
+//
+// * Examples of Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-configuration-examples.html)
+//
+// * GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html)
+//
+// * DeleteBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html)
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketLifecycleConfiguration for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration
+func (c *S3) PutBucketLifecycleConfiguration(input *PutBucketLifecycleConfigurationInput) (*PutBucketLifecycleConfigurationOutput, error) {
+ req, out := c.PutBucketLifecycleConfigurationRequest(input)
+ return out, req.Send()
}
// PutBucketLifecycleConfigurationWithContext is the same as PutBucketLifecycleConfiguration with the addition of
@@ -5441,15 +7321,63 @@ func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request
output = &PutBucketLoggingOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ req.Handlers.Build.PushBackNamed(request.NamedHandler{
+ Name: "contentMd5Handler",
+ Fn: checksum.AddBodyContentMD5Handler,
+ })
return
}
// PutBucketLogging API operation for Amazon Simple Storage Service.
//
// Set the logging parameters for a bucket and to specify permissions for who
-// can view and modify the logging parameters. To set the logging status of
+// can view and modify the logging parameters. All logs are saved to buckets
+// in the same AWS Region as the source bucket. To set the logging status of
// a bucket, you must be the bucket owner.
//
+// The bucket owner is automatically granted FULL_CONTROL to all logs. You use
+// the Grantee request element to grant access to other people. The Permissions
+// request element specifies the kind of access the grantee has to the logs.
+//
+// Grantee Values
+//
+// You can specify the person (grantee) to whom you're assigning access rights
+// (using request elements) in the following ways:
+//
+// * By the person's ID: <>ID<><>GranteesEmail<>
+// DisplayName is optional and ignored in the request.
+//
+// * By Email address: <>Grantees@email.com<>
+// The grantee is resolved to the CanonicalUser and, in a response to a GET
+// Object acl request, appears as the CanonicalUser.
+//
+// * By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<>
+//
+// To enable logging, you use LoggingEnabled and its children request elements.
+// To disable logging, you use an empty BucketLoggingStatus request element:
+//
+//
+//
+// For more information about server access logging, see Server Access Logging
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerLogs.html).
+//
+// For more information about creating a bucket, see CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html).
+// For more information about returning the logging status of a bucket, see
+// GetBucketLogging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html).
+//
+// The following operations are related to PutBucketLogging:
+//
+// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html)
+//
+// * DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html)
+//
+// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html)
+//
+// * GetBucketLogging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html)
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -5524,7 +7452,33 @@ func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigu
// PutBucketMetricsConfiguration API operation for Amazon Simple Storage Service.
//
// Sets a metrics configuration (specified by the metrics configuration ID)
-// for the bucket.
+// for the bucket. You can have up to 1,000 metrics configurations per bucket.
+// If you're updating an existing metrics configuration, note that this is a
+// full replacement of the existing metrics configuration. If you don't include
+// the elements you want to keep, they are erased.
+//
+// To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration
+// action. The bucket owner has this permission by default. The bucket owner
+// can grant this permission to others. For more information about permissions,
+// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
+//
+// For information about CloudWatch request metrics for Amazon S3, see Monitoring
+// Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html).
+//
+// The following operations are related to PutBucketMetricsConfiguration:
+//
+// * DeleteBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html)
+//
+// * PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html)
+//
+// * ListBucketMetricsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html)
+//
+// GetBucketLifecycle has the following special error:
+//
+// * Error code: TooManyConfigurations Description: You are attempting to
+// create a new configuration but have already reached the 1,000-configuration
+// limit. HTTP Status Code: HTTP 400 Bad Request
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -5599,12 +7553,17 @@ func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (re
output = &PutBucketNotificationOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ req.Handlers.Build.PushBackNamed(request.NamedHandler{
+ Name: "contentMd5Handler",
+ Fn: checksum.AddBodyContentMD5Handler,
+ })
return
}
// PutBucketNotification API operation for Amazon Simple Storage Service.
//
-// No longer used, see the PutBucketNotificationConfiguration operation.
+// No longer used, see the PutBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotificationConfiguration.html)
+// operation.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -5683,7 +7642,55 @@ func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificat
// PutBucketNotificationConfiguration API operation for Amazon Simple Storage Service.
//
-// Enables notifications of specified events for a bucket.
+// Enables notifications of specified events for a bucket. For more information
+// about event notifications, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html).
+//
+// Using this API, you can replace an existing notification configuration. The
+// configuration is an XML file that defines the event types that you want Amazon
+// S3 to publish and the destination where you want Amazon S3 to publish an
+// event notification when it detects an event of the specified type.
+//
+// By default, your bucket has no event notifications configured. That is, the
+// notification configuration will be an empty NotificationConfiguration.
+//
+//
+//
+//
+//
+// This operation replaces the existing notification configuration with the
+// configuration you include in the request body.
+//
+// After Amazon S3 receives this request, it first verifies that any Amazon
+// Simple Notification Service (Amazon SNS) or Amazon Simple Queue Service (Amazon
+// SQS) destination exists, and that the bucket owner has permission to publish
+// to it by sending a test notification. In the case of AWS Lambda destinations,
+// Amazon S3 verifies that the Lambda function permissions grant Amazon S3 permission
+// to invoke the function from the Amazon S3 bucket. For more information, see
+// Configuring Notifications for Amazon S3 Events (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html).
+//
+// You can disable notifications by adding the empty NotificationConfiguration
+// element.
+//
+// By default, only the bucket owner can configure notifications on a bucket.
+// However, bucket owners can use a bucket policy to grant permission to other
+// users to set this configuration with s3:PutBucketNotification permission.
+//
+// The PUT notification is an atomic operation. For example, suppose your notification
+// configuration includes SNS topic, SQS queue, and Lambda function configurations.
+// When you send a PUT request with this configuration, Amazon S3 sends test
+// messages to your SNS topic. If the message fails, the entire PUT operation
+// will fail, and Amazon S3 will not add the configuration to your bucket.
+//
+// Responses
+//
+// If the configuration in the request body includes only one TopicConfiguration
+// specifying only the s3:ReducedRedundancyLostObject event type, the response
+// will also include the x-amz-sns-test-message-id header containing the message
+// ID of the test notification sent to the topic.
+//
+// The following operation is related to PutBucketNotificationConfiguration:
+//
+// * GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -5753,12 +7760,37 @@ func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.R
output = &PutBucketPolicyOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ req.Handlers.Build.PushBackNamed(request.NamedHandler{
+ Name: "contentMd5Handler",
+ Fn: checksum.AddBodyContentMD5Handler,
+ })
return
}
// PutBucketPolicy API operation for Amazon Simple Storage Service.
//
-// Applies an Amazon S3 bucket policy to an Amazon S3 bucket.
+// Applies an Amazon S3 bucket policy to an Amazon S3 bucket. If you are using
+// an identity other than the root user of the AWS account that owns the bucket,
+// the calling identity must have the PutBucketPolicy permissions on the specified
+// bucket and belong to the bucket owner's account in order to use this operation.
+//
+// If you don't have PutBucketPolicy permissions, Amazon S3 returns a 403 Access
+// Denied error. If you have the correct permissions, but you're not using an
+// identity that belongs to the bucket owner's account, Amazon S3 returns a
+// 405 Method Not Allowed error.
+//
+// As a security precaution, the root user of the AWS account that owns a bucket
+// can always use this operation, even if the policy explicitly denies the root
+// user the ability to perform this action.
+//
+// For more information about bucket policies, see Using Bucket Policies and
+// User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html).
+//
+// The following operations are related to PutBucketPolicy:
+//
+// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html)
+//
+// * DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -5828,15 +7860,74 @@ func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req
output = &PutBucketReplicationOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ req.Handlers.Build.PushBackNamed(request.NamedHandler{
+ Name: "contentMd5Handler",
+ Fn: checksum.AddBodyContentMD5Handler,
+ })
return
}
// PutBucketReplication API operation for Amazon Simple Storage Service.
//
// Creates a replication configuration or replaces an existing one. For more
-// information, see Cross-Region Replication (CRR) (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html)
+// information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html)
// in the Amazon S3 Developer Guide.
//
+// To perform this operation, the user or role performing the operation must
+// have the iam:PassRole (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html)
+// permission.
+//
+// Specify the replication configuration in the request body. In the replication
+// configuration, you provide the name of the destination bucket where you want
+// Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume to
+// replicate objects on your behalf, and other relevant information.
+//
+// A replication configuration must include at least one rule, and can contain
+// a maximum of 1,000. Each rule identifies a subset of objects to replicate
+// by filtering the objects in the source bucket. To choose additional subsets
+// of objects to replicate, add a rule for each subset. All rules must specify
+// the same destination bucket.
+//
+// To specify a subset of the objects in the source bucket to apply a replication
+// rule to, add the Filter element as a child of the Rule element. You can filter
+// objects based on an object key prefix, one or more object tags, or both.
+// When you add the Filter element in the configuration, you must also add the
+// following elements: DeleteMarkerReplication, Status, and Priority.
+//
+// The latest version of the replication configuration XML is V2. XML V2 replication
+// configurations are those that contain the Filter element for rules, and rules
+// that specify S3 Replication Time Control (S3 RTC). In XML V2 replication
+// configurations, Amazon S3 doesn't replicate delete markers. Therefore, you
+// must set the DeleteMarkerReplication element to Disabled. For backward compatibility,
+// Amazon S3 continues to support the XML V1 replication configuration.
+//
+// For information about enabling versioning on a bucket, see Using Versioning
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html).
+//
+// By default, a resource owner, in this case the AWS account that created the
+// bucket, can perform this operation. The resource owner can also grant others
+// permissions to perform the operation. For more information about permissions,
+// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
+//
+// Handling Replication of Encrypted Objects
+//
+// By default, Amazon S3 doesn't replicate objects that are stored at rest using
+// server-side encryption with CMKs stored in AWS KMS. To replicate AWS KMS-encrypted
+// objects, add the following: SourceSelectionCriteria, SseKmsEncryptedObjects,
+// Status, EncryptionConfiguration, and ReplicaKmsKeyID. For information about
+// replication configuration, see Replicating Objects Created with SSE Using
+// CMKs stored in AWS KMS (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-config-for-kms-objects.html).
+//
+// For information on PutBucketReplication errors, see List of replication-related
+// error codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList)
+//
+// The following operations are related to PutBucketReplication:
+//
+// * GetBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html)
+//
+// * DeleteBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html)
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -5905,6 +7996,10 @@ func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput)
output = &PutBucketRequestPaymentOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ req.Handlers.Build.PushBackNamed(request.NamedHandler{
+ Name: "contentMd5Handler",
+ Fn: checksum.AddBodyContentMD5Handler,
+ })
return
}
@@ -5913,8 +8008,14 @@ func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput)
// Sets the request payment configuration for a bucket. By default, the bucket
// owner pays for downloads from the bucket. This configuration parameter enables
// the bucket owner (only) to specify that the person requesting the download
-// will be charged for the download. Documentation on requester pays buckets
-// can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html
+// will be charged for the download. For more information, see Requester Pays
+// Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html).
+//
+// The following operations are related to PutBucketRequestPayment:
+//
+// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html)
+//
+// * GetBucketRequestPayment (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketRequestPayment.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -5984,6 +8085,10 @@ func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request
output = &PutBucketTaggingOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ req.Handlers.Build.PushBackNamed(request.NamedHandler{
+ Name: "contentMd5Handler",
+ Fn: checksum.AddBodyContentMD5Handler,
+ })
return
}
@@ -5991,6 +8096,47 @@ func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request
//
// Sets the tags for a bucket.
//
+// Use tags to organize your AWS bill to reflect your own cost structure. To
+// do this, sign up to get your AWS account bill with tag key values included.
+// Then, to see the cost of combined resources, organize your billing information
+// according to resources with the same tag key values. For example, you can
+// tag several resources with a specific application name, and then organize
+// your billing information to see the total cost of that application across
+// several services. For more information, see Cost Allocation and Tagging (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html).
+//
+// Within a bucket, if you add a tag that has the same key as an existing tag,
+// the new value overwrites the old value. For more information, see Using Cost
+// Allocation in Amazon S3 Bucket Tags (https://docs.aws.amazon.com/AmazonS3/latest/dev/CostAllocTagging.html).
+//
+// To use this operation, you must have permissions to perform the s3:PutBucketTagging
+// action. The bucket owner has this permission by default and can grant this
+// permission to others. For more information about permissions, see Permissions
+// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
+//
+// PutBucketTagging has the following special errors:
+//
+// * Error code: InvalidTagError Description: The tag provided was not a
+// valid tag. This error can occur if the tag did not pass input validation.
+// For information about tag restrictions, see User-Defined Tag Restrictions
+// (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html)
+// and AWS-Generated Cost Allocation Tag Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/aws-tag-restrictions.html).
+//
+// * Error code: MalformedXMLError Description: The XML provided does not
+// match the schema.
+//
+// * Error code: OperationAbortedError Description: A conflicting conditional
+// operation is currently in progress against this resource. Please try again.
+//
+// * Error code: InternalError Description: The service was unable to apply
+// the provided tag to the bucket.
+//
+// The following operations are related to PutBucketTagging:
+//
+// * GetBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html)
+//
+// * DeleteBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html)
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -6059,6 +8205,10 @@ func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *r
output = &PutBucketVersioningOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ req.Handlers.Build.PushBackNamed(request.NamedHandler{
+ Name: "contentMd5Handler",
+ Fn: checksum.AddBodyContentMD5Handler,
+ })
return
}
@@ -6067,6 +8217,39 @@ func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *r
// Sets the versioning state of an existing bucket. To set the versioning state,
// you must be the bucket owner.
//
+// You can set the versioning state with one of the following values:
+//
+// Enabled—Enables versioning for the objects in the bucket. All objects added
+// to the bucket receive a unique version ID.
+//
+// Suspended—Disables versioning for the objects in the bucket. All objects
+// added to the bucket receive the version ID null.
+//
+// If the versioning state has never been set on a bucket, it has no versioning
+// state; a GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html)
+// request does not return a versioning state value.
+//
+// If the bucket owner enables MFA Delete in the bucket versioning configuration,
+// the bucket owner must include the x-amz-mfa request header and the Status
+// and the MfaDelete request elements in a request to set the versioning state
+// of the bucket.
+//
+// If you have an object expiration lifecycle policy in your non-versioned bucket
+// and you want to maintain the same permanent delete behavior when you enable
+// versioning, you must add a noncurrent expiration policy. The noncurrent expiration
+// lifecycle policy will manage the deletes of the noncurrent object versions
+// in the version-enabled bucket. (A version-enabled bucket maintains one current
+// and zero or more noncurrent object versions.) For more information, see Lifecycle
+// and Versioning (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-and-other-bucket-config).
+//
+// Related Resources
+//
+// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html)
+//
+// * DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html)
+//
+// * GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html)
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -6135,12 +8318,81 @@ func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request
output = &PutBucketWebsiteOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ req.Handlers.Build.PushBackNamed(request.NamedHandler{
+ Name: "contentMd5Handler",
+ Fn: checksum.AddBodyContentMD5Handler,
+ })
return
}
// PutBucketWebsite API operation for Amazon Simple Storage Service.
//
-// Set the website configuration for a bucket.
+// Sets the configuration of the website that is specified in the website subresource.
+// To configure a bucket as a website, you can add this subresource on the bucket
+// with website configuration information such as the file name of the index
+// document and any redirect rules. For more information, see Hosting Websites
+// on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html).
+//
+// This PUT operation requires the S3:PutBucketWebsite permission. By default,
+// only the bucket owner can configure the website attached to a bucket; however,
+// bucket owners can allow other users to set the website configuration by writing
+// a bucket policy that grants them the S3:PutBucketWebsite permission.
+//
+// To redirect all website requests sent to the bucket's website endpoint, you
+// add a website configuration with the following elements. Because all requests
+// are sent to another website, you don't need to provide index document name
+// for the bucket.
+//
+// * WebsiteConfiguration
+//
+// * RedirectAllRequestsTo
+//
+// * HostName
+//
+// * Protocol
+//
+// If you want granular control over redirects, you can use the following elements
+// to add routing rules that describe conditions for redirecting requests and
+// information about the redirect destination. In this case, the website configuration
+// must provide an index document for the bucket, because some requests might
+// not be redirected.
+//
+// * WebsiteConfiguration
+//
+// * IndexDocument
+//
+// * Suffix
+//
+// * ErrorDocument
+//
+// * Key
+//
+// * RoutingRules
+//
+// * RoutingRule
+//
+// * Condition
+//
+// * HttpErrorCodeReturnedEquals
+//
+// * KeyPrefixEquals
+//
+// * Redirect
+//
+// * Protocol
+//
+// * HostName
+//
+// * ReplaceKeyPrefixWith
+//
+// * ReplaceKeyWith
+//
+// * HttpRedirectCode
+//
+// Amazon S3 has a limitation of 50 routing rules per website configuration.
+// If you require more than 50 routing rules, you can use object redirect. For
+// more information, see Configuring an Object Redirect (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html)
+// in the Amazon Simple Storage Service Developer Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -6214,7 +8466,71 @@ func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, outp
// PutObject API operation for Amazon Simple Storage Service.
//
-// Adds an object to a bucket.
+// Adds an object to a bucket. You must have WRITE permissions on a bucket to
+// add an object to it.
+//
+// Amazon S3 never adds partial objects; if you receive a success response,
+// Amazon S3 added the entire object to the bucket.
+//
+// Amazon S3 is a distributed system. If it receives multiple write requests
+// for the same object simultaneously, it overwrites all but the last object
+// written. Amazon S3 does not provide object locking; if you need this, make
+// sure to build it into your application layer or use versioning instead.
+//
+// To ensure that data is not corrupted traversing the network, use the Content-MD5
+// header. When you use this header, Amazon S3 checks the object against the
+// provided MD5 value and, if they do not match, returns an error. Additionally,
+// you can calculate the MD5 while putting an object to Amazon S3 and compare
+// the returned ETag to the calculated MD5 value.
+//
+// The Content-MD5 header is required for any request to upload an object with
+// a retention period configured using Amazon S3 Object Lock. For more information
+// about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html)
+// in the Amazon Simple Storage Service Developer Guide.
+//
+// Server-side Encryption
+//
+// You can optionally request server-side encryption. With server-side encryption,
+// Amazon S3 encrypts your data as it writes it to disks in its data centers
+// and decrypts the data when you access it. You have the option to provide
+// your own encryption key or use AWS managed encryption keys. For more information,
+// see Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html).
+//
+// Access Control List (ACL)-Specific Request Headers
+//
+// You can use headers to grant ACL- based permissions. By default, all objects
+// are private. Only the owner has full access control. When adding a new object,
+// you can grant permissions to individual AWS accounts or to predefined groups
+// defined by Amazon S3. These permissions are then added to the ACL on the
+// object. For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html)
+// and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html).
+//
+// Storage Class Options
+//
+// By default, Amazon S3 uses the STANDARD storage class to store newly created
+// objects. The STANDARD storage class provides high durability and high availability.
+// Depending on performance needs, you can specify a different storage class.
+// For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html)
+// in the Amazon S3 Service Developer Guide.
+//
+// Versioning
+//
+// If you enable versioning for a bucket, Amazon S3 automatically generates
+// a unique version ID for the object being stored. Amazon S3 returns this ID
+// in the response. When you enable versioning for a bucket, if Amazon S3 receives
+// multiple write requests for the same object simultaneously, it stores all
+// of the objects.
+//
+// For more information about versioning, see Adding Objects to Versioning Enabled
+// Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html).
+// For information about returning the versioning state of a bucket, see GetBucketVersioning
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html).
+//
+// Related Resources
+//
+// * CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html)
+//
+// * DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -6283,13 +8599,97 @@ func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request
output = &PutObjectAclOutput{}
req = c.newRequest(op, input, output)
+ req.Handlers.Build.PushBackNamed(request.NamedHandler{
+ Name: "contentMd5Handler",
+ Fn: checksum.AddBodyContentMD5Handler,
+ })
return
}
// PutObjectAcl API operation for Amazon Simple Storage Service.
//
-// uses the acl subresource to set the access control list (ACL) permissions
-// for an object that already exists in a bucket
+// Uses the acl subresource to set the access control list (ACL) permissions
+// for an object that already exists in a bucket. You must have WRITE_ACP permission
+// to set the ACL of an object.
+//
+// Depending on your application needs, you can choose to set the ACL on an
+// object using either the request body or the headers. For example, if you
+// have an existing application that updates a bucket ACL using the request
+// body, you can continue to use that approach. For more information, see Access
+// Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html)
+// in the Amazon S3 Developer Guide.
+//
+// Access Permissions
+//
+// You can set access permissions using one of the following methods:
+//
+// * Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports
+// a set of predefined ACLs, known as canned ACLs. Each canned ACL has a
+// predefined set of grantees and permissions. Specify the canned ACL name
+// as the value of x-amz-acl. If you use this header, you cannot use other
+// access control-specific headers in your request. For more information,
+// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL).
+//
+// * Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp,
+// x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using
+// these headers, you specify explicit access permissions and grantees (AWS
+// accounts or Amazon S3 groups) who will receive the permission. If you
+// use these ACL-specific headers, you cannot use x-amz-acl header to set
+// a canned ACL. These parameters map to the set of permissions that Amazon
+// S3 supports in an ACL. For more information, see Access Control List (ACL)
+// Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html).
+// You specify each grantee as a type=value pair, where the type is one of
+// the following: id – if the value specified is the canonical user ID
+// of an AWS account uri – if you are granting permissions to a predefined
+// group emailAddress – if the value specified is the email address of
+// an AWS account Using email addresses to specify a grantee is only supported
+// in the following AWS Regions: US East (N. Virginia) US West (N. California)
+// US West (Oregon) Asia Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific
+// (Tokyo) Europe (Ireland) South America (São Paulo) For a list of all
+// the Amazon S3 supported Regions and endpoints, see Regions and Endpoints
+// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in
+// the AWS General Reference. For example, the following x-amz-grant-read
+// header grants list objects permission to the two AWS accounts identified
+// by their email addresses. x-amz-grant-read: emailAddress="xyz@amazon.com",
+// emailAddress="abc@amazon.com"
+//
+// You can use either a canned ACL or specify access permissions explicitly.
+// You cannot do both.
+//
+// Grantee Values
+//
+// You can specify the person (grantee) to whom you're assigning access rights
+// (using request elements) in the following ways:
+//
+// * By the person's ID: <>ID<><>GranteesEmail<>
+// DisplayName is optional and ignored in the request.
+//
+// * By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<>
+//
+// * By Email address: <>Grantees@email.com<>lt;/Grantee>
+// The grantee is resolved to the CanonicalUser and, in a response to a GET
+// Object acl request, appears as the CanonicalUser. Using email addresses
+// to specify a grantee is only supported in the following AWS Regions: US
+// East (N. Virginia) US West (N. California) US West (Oregon) Asia Pacific
+// (Singapore) Asia Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland)
+// South America (São Paulo) For a list of all the Amazon S3 supported Regions
+// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region)
+// in the AWS General Reference.
+//
+// Versioning
+//
+// The ACL of an object is set at the object version level. By default, PUT
+// sets the ACL of the current version of an object. To set the ACL of a different
+// version, use the versionId subresource.
+//
+// Related Resources
+//
+// * CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html)
+//
+// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -6363,6 +8763,10 @@ func (c *S3) PutObjectLegalHoldRequest(input *PutObjectLegalHoldInput) (req *req
output = &PutObjectLegalHoldOutput{}
req = c.newRequest(op, input, output)
+ req.Handlers.Build.PushBackNamed(request.NamedHandler{
+ Name: "contentMd5Handler",
+ Fn: checksum.AddBodyContentMD5Handler,
+ })
return
}
@@ -6370,6 +8774,10 @@ func (c *S3) PutObjectLegalHoldRequest(input *PutObjectLegalHoldInput) (req *req
//
// Applies a Legal Hold configuration to the specified object.
//
+// Related Resources
+//
+// * Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html)
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -6437,15 +8845,26 @@ func (c *S3) PutObjectLockConfigurationRequest(input *PutObjectLockConfiguration
output = &PutObjectLockConfigurationOutput{}
req = c.newRequest(op, input, output)
+ req.Handlers.Build.PushBackNamed(request.NamedHandler{
+ Name: "contentMd5Handler",
+ Fn: checksum.AddBodyContentMD5Handler,
+ })
return
}
// PutObjectLockConfiguration API operation for Amazon Simple Storage Service.
//
-// Places an object lock configuration on the specified bucket. The rule specified
-// in the object lock configuration will be applied by default to every new
+// Places an Object Lock configuration on the specified bucket. The rule specified
+// in the Object Lock configuration will be applied by default to every new
// object placed in the specified bucket.
//
+// DefaultRetention requires either Days or Years. You can't specify both at
+// the same time.
+//
+// Related Resources
+//
+// * Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html)
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -6513,6 +8932,10 @@ func (c *S3) PutObjectRetentionRequest(input *PutObjectRetentionInput) (req *req
output = &PutObjectRetentionOutput{}
req = c.newRequest(op, input, output)
+ req.Handlers.Build.PushBackNamed(request.NamedHandler{
+ Name: "contentMd5Handler",
+ Fn: checksum.AddBodyContentMD5Handler,
+ })
return
}
@@ -6520,6 +8943,10 @@ func (c *S3) PutObjectRetentionRequest(input *PutObjectRetentionInput) (req *req
//
// Places an Object Retention configuration on an object.
//
+// Related Resources
+//
+// * Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html)
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -6587,12 +9014,53 @@ func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request
output = &PutObjectTaggingOutput{}
req = c.newRequest(op, input, output)
+ req.Handlers.Build.PushBackNamed(request.NamedHandler{
+ Name: "contentMd5Handler",
+ Fn: checksum.AddBodyContentMD5Handler,
+ })
return
}
// PutObjectTagging API operation for Amazon Simple Storage Service.
//
-// Sets the supplied tag-set to an object that already exists in a bucket
+// Sets the supplied tag-set to an object that already exists in a bucket.
+//
+// A tag is a key-value pair. You can associate tags with an object by sending
+// a PUT request against the tagging subresource that is associated with the
+// object. You can retrieve tags by sending a GET request. For more information,
+// see GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html).
+//
+// For tagging-related restrictions related to characters and encodings, see
+// Tag Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html).
+// Note that Amazon S3 limits the maximum number of tags to 10 tags per object.
+//
+// To use this operation, you must have permission to perform the s3:PutObjectTagging
+// action. By default, the bucket owner has this permission and can grant this
+// permission to others.
+//
+// To put tags of any other version, use the versionId query parameter. You
+// also need permission for the s3:PutObjectVersionTagging action.
+//
+// For information about the Amazon S3 object tagging feature, see Object Tagging
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html).
+//
+// Special Errors
+//
+// * Code: InvalidTagError Cause: The tag provided was not a valid tag. This
+// error can occur if the tag did not pass input validation. For more information,
+// see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html).
+//
+// * Code: MalformedXMLError Cause: The XML provided does not match the schema.
+//
+// * Code: OperationAbortedError Cause: A conflicting conditional operation
+// is currently in progress against this resource. Please try again.
+//
+// * Code: InternalError Cause: The service was unable to apply the provided
+// tag to the object.
+//
+// Related Resources
+//
+// * GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -6662,13 +9130,39 @@ func (c *S3) PutPublicAccessBlockRequest(input *PutPublicAccessBlockInput) (req
output = &PutPublicAccessBlockOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ req.Handlers.Build.PushBackNamed(request.NamedHandler{
+ Name: "contentMd5Handler",
+ Fn: checksum.AddBodyContentMD5Handler,
+ })
return
}
// PutPublicAccessBlock API operation for Amazon Simple Storage Service.
//
// Creates or modifies the PublicAccessBlock configuration for an Amazon S3
-// bucket.
+// bucket. To use this operation, you must have the s3:PutBucketPublicAccessBlock
+// permission. For more information about Amazon S3 permissions, see Specifying
+// Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html).
+//
+// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket
+// or an object, it checks the PublicAccessBlock configuration for both the
+// bucket (or the bucket that contains the object) and the bucket owner's account.
+// If the PublicAccessBlock configurations are different between the bucket
+// and the account, Amazon S3 uses the most restrictive combination of the bucket-level
+// and account-level settings.
+//
+// For more information about when Amazon S3 considers a bucket or an object
+// public, see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status).
+//
+// Related Resources
+//
+// * GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html)
+//
+// * DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html)
+//
+// * GetBucketPolicyStatus (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html)
+//
+// * Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -6744,6 +9238,192 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque
//
// Restores an archived copy of an object back into Amazon S3
//
+// This operation performs the following types of requests:
+//
+// * select - Perform a select query on an archived object
+//
+// * restore an archive - Restore an archived object
+//
+// To use this operation, you must have permissions to perform the s3:RestoreObject
+// action. The bucket owner has this permission by default and can grant this
+// permission to others. For more information about permissions, see Permissions
+// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html)
+// in the Amazon Simple Storage Service Developer Guide.
+//
+// Querying Archives with Select Requests
+//
+// You use a select type of request to perform SQL queries on archived objects.
+// The archived objects that are being queried by the select request must be
+// formatted as uncompressed comma-separated values (CSV) files. You can run
+// queries and custom analytics on your archived data without having to restore
+// your data to a hotter Amazon S3 tier. For an overview about select requests,
+// see Querying Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html)
+// in the Amazon Simple Storage Service Developer Guide.
+//
+// When making a select request, do the following:
+//
+// * Define an output location for the select query's output. This must be
+// an Amazon S3 bucket in the same AWS Region as the bucket that contains
+// the archive object that is being queried. The AWS account that initiates
+// the job must have permissions to write to the S3 bucket. You can specify
+// the storage class and encryption for the output objects stored in the
+// bucket. For more information about output, see Querying Archived Objects
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html)
+// in the Amazon Simple Storage Service Developer Guide. For more information
+// about the S3 structure in the request body, see the following: PutObject
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) Managing
+// Access with ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html)
+// in the Amazon Simple Storage Service Developer Guide Protecting Data Using
+// Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html)
+// in the Amazon Simple Storage Service Developer Guide
+//
+// * Define the SQL expression for the SELECT type of restoration for your
+// query in the request body's SelectParameters structure. You can use expressions
+// like the following examples. The following expression returns all records
+// from the specified object. SELECT * FROM Object Assuming that you are
+// not using any headers for data stored in the object, you can specify columns
+// with positional headers. SELECT s._1, s._2 FROM Object s WHERE s._3 >
+// 100 If you have headers and you set the fileHeaderInfo in the CSV structure
+// in the request body to USE, you can specify headers in the query. (If
+// you set the fileHeaderInfo field to IGNORE, the first row is skipped for
+// the query.) You cannot mix ordinal positions with header column names.
+// SELECT s.Id, s.FirstName, s.SSN FROM S3Object s
+//
+// For more information about using SQL with S3 Glacier Select restore, see
+// SQL Reference for Amazon S3 Select and S3 Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html)
+// in the Amazon Simple Storage Service Developer Guide.
+//
+// When making a select request, you can also do the following:
+//
+// * To expedite your queries, specify the Expedited tier. For more information
+// about tiers, see "Restoring Archives," later in this topic.
+//
+// * Specify details about the data serialization format of both the input
+// object that is being queried and the serialization of the CSV-encoded
+// query results.
+//
+// The following are additional important facts about the select feature:
+//
+// * The output results are new Amazon S3 objects. Unlike archive retrievals,
+// they are stored until explicitly deleted-manually or through a lifecycle
+// policy.
+//
+// * You can issue more than one select request on the same Amazon S3 object.
+// Amazon S3 doesn't deduplicate requests, so avoid issuing duplicate requests.
+//
+// * Amazon S3 accepts a select request even if the object has already been
+// restored. A select request doesn’t return error response 409.
+//
+// Restoring Archives
+//
+// Objects in the GLACIER and DEEP_ARCHIVE storage classes are archived. To
+// access an archived object, you must first initiate a restore request. This
+// restores a temporary copy of the archived object. In a restore request, you
+// specify the number of days that you want the restored copy to exist. After
+// the specified period, Amazon S3 deletes the temporary copy but the object
+// remains archived in the GLACIER or DEEP_ARCHIVE storage class that object
+// was restored from.
+//
+// To restore a specific object version, you can provide a version ID. If you
+// don't provide a version ID, Amazon S3 restores the current version.
+//
+// The time it takes restore jobs to finish depends on which storage class the
+// object is being restored from and which data access tier you specify.
+//
+// When restoring an archived object (or using a select request), you can specify
+// one of the following data access tier options in the Tier element of the
+// request body:
+//
+// * Expedited - Expedited retrievals allow you to quickly access your data
+// stored in the GLACIER storage class when occasional urgent requests for
+// a subset of archives are required. For all but the largest archived objects
+// (250 MB+), data accessed using Expedited retrievals are typically made
+// available within 1–5 minutes. Provisioned capacity ensures that retrieval
+// capacity for Expedited retrievals is available when you need it. Expedited
+// retrievals and provisioned capacity are not available for the DEEP_ARCHIVE
+// storage class.
+//
+// * Standard - S3 Standard retrievals allow you to access any of your archived
+// objects within several hours. This is the default option for the GLACIER
+// and DEEP_ARCHIVE retrieval requests that do not specify the retrieval
+// option. S3 Standard retrievals typically complete within 3-5 hours from
+// the GLACIER storage class and typically complete within 12 hours from
+// the DEEP_ARCHIVE storage class.
+//
+// * Bulk - Bulk retrievals are Amazon S3 Glacier’s lowest-cost retrieval
+// option, enabling you to retrieve large amounts, even petabytes, of data
+// inexpensively in a day. Bulk retrievals typically complete within 5-12
+// hours from the GLACIER storage class and typically complete within 48
+// hours from the DEEP_ARCHIVE storage class.
+//
+// For more information about archive retrieval options and provisioned capacity
+// for Expedited data access, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html)
+// in the Amazon Simple Storage Service Developer Guide.
+//
+// You can use Amazon S3 restore speed upgrade to change the restore speed to
+// a faster speed while it is in progress. You upgrade the speed of an in-progress
+// restoration by issuing another restore request to the same object, setting
+// a new Tier request element. When issuing a request to upgrade the restore
+// tier, you must choose a tier that is faster than the tier that the in-progress
+// restore is using. You must not change any other parameters, such as the Days
+// request element. For more information, see Upgrading the Speed of an In-Progress
+// Restore (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html#restoring-objects-upgrade-tier.title.html)
+// in the Amazon Simple Storage Service Developer Guide.
+//
+// To get the status of object restoration, you can send a HEAD request. Operations
+// return the x-amz-restore header, which provides information about the restoration
+// status, in the response. You can use Amazon S3 event notifications to notify
+// you when a restore is initiated or completed. For more information, see Configuring
+// Amazon S3 Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
+// in the Amazon Simple Storage Service Developer Guide.
+//
+// After restoring an archived object, you can update the restoration period
+// by reissuing the request with a new period. Amazon S3 updates the restoration
+// period relative to the current time and charges only for the request-there
+// are no data transfer charges. You cannot update the restoration period when
+// Amazon S3 is actively processing your current restore request for the object.
+//
+// If your bucket has a lifecycle configuration with a rule that includes an
+// expiration action, the object expiration overrides the life span that you
+// specify in a restore request. For example, if you restore an object copy
+// for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes
+// the object in 3 days. For more information about lifecycle configuration,
+// see PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html)
+// and Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html)
+// in Amazon Simple Storage Service Developer Guide.
+//
+// Responses
+//
+// A successful operation returns either the 200 OK or 202 Accepted status code.
+//
+// * If the object copy is not previously restored, then Amazon S3 returns
+// 202 Accepted in the response.
+//
+// * If the object copy is previously restored, Amazon S3 returns 200 OK
+// in the response.
+//
+// Special Errors
+//
+// * Code: RestoreAlreadyInProgress Cause: Object restore is already in progress.
+// (This error does not apply to SELECT type requests.) HTTP Status Code:
+// 409 Conflict SOAP Fault Code Prefix: Client
+//
+// * Code: GlacierExpeditedRetrievalNotAvailable Cause: S3 Glacier expedited
+// retrievals are currently not available. Try again later. (Returned if
+// there is insufficient capacity to process the Expedited request. This
+// error applies only to Expedited retrievals and not to S3 Standard or Bulk
+// retrievals.) HTTP Status Code: 503 SOAP Fault Code Prefix: N/A
+//
+// Related Resources
+//
+// * PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html)
+//
+// * GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html)
+//
+// * SQL Reference for Amazon S3 Select and S3 Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html)
+// in the Amazon Simple Storage Service Developer Guide
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -6753,7 +9433,7 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque
//
// Returned Error Codes:
// * ErrCodeObjectAlreadyInActiveTierError "ObjectAlreadyInActiveTierError"
-// This operation is not allowed against this storage tier
+// This operation is not allowed against this storage tier.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject
func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) {
@@ -6816,20 +9496,106 @@ func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *r
output = &SelectObjectContentOutput{}
req = c.newRequest(op, input, output)
+
+ es := newSelectObjectContentEventStream()
+ req.Handlers.Unmarshal.PushBack(es.setStreamCloser)
+ output.EventStream = es
+
req.Handlers.Send.Swap(client.LogHTTPResponseHandler.Name, client.LogHTTPResponseHeaderHandler)
req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, rest.UnmarshalHandler)
- req.Handlers.Unmarshal.PushBack(output.runEventStreamLoop)
+ req.Handlers.Unmarshal.PushBack(es.runOutputStream)
+ req.Handlers.Unmarshal.PushBack(es.runOnStreamPartClose)
return
}
// SelectObjectContent API operation for Amazon Simple Storage Service.
//
// This operation filters the contents of an Amazon S3 object based on a simple
-// Structured Query Language (SQL) statement. In the request, along with the
-// SQL expression, you must also specify a data serialization format (JSON or
-// CSV) of the object. Amazon S3 uses this to parse object data into records,
-// and returns only records that match the specified SQL expression. You must
-// also specify the data serialization format for the response.
+// structured query language (SQL) statement. In the request, along with the
+// SQL expression, you must also specify a data serialization format (JSON,
+// CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse
+// object data into records, and returns only records that match the specified
+// SQL expression. You must also specify the data serialization format for the
+// response.
+//
+// For more information about Amazon S3 Select, see Selecting Content from Objects
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html)
+// in the Amazon Simple Storage Service Developer Guide.
+//
+// For more information about using SQL with Amazon S3 Select, see SQL Reference
+// for Amazon S3 Select and S3 Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html)
+// in the Amazon Simple Storage Service Developer Guide.
+//
+// Permissions
+//
+// You must have s3:GetObject permission for this operation. Amazon S3 Select
+// does not support anonymous access. For more information about permissions,
+// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html)
+// in the Amazon Simple Storage Service Developer Guide.
+//
+// Object Data Formats
+//
+// You can use Amazon S3 Select to query objects that have the following format
+// properties:
+//
+// * CSV, JSON, and Parquet - Objects must be in CSV, JSON, or Parquet format.
+//
+// * UTF-8 - UTF-8 is the only encoding type Amazon S3 Select supports.
+//
+// * GZIP or BZIP2 - CSV and JSON files can be compressed using GZIP or BZIP2.
+// GZIP and BZIP2 are the only compression formats that Amazon S3 Select
+// supports for CSV and JSON files. Amazon S3 Select supports columnar compression
+// for Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object
+// compression for Parquet objects.
+//
+// * Server-side encryption - Amazon S3 Select supports querying objects
+// that are protected with server-side encryption. For objects that are encrypted
+// with customer-provided encryption keys (SSE-C), you must use HTTPS, and
+// you must use the headers that are documented in the GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html).
+// For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided
+// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html)
+// in the Amazon Simple Storage Service Developer Guide. For objects that
+// are encrypted with Amazon S3 managed encryption keys (SSE-S3) and customer
+// master keys (CMKs) stored in AWS Key Management Service (SSE-KMS), server-side
+// encryption is handled transparently, so you don't need to specify anything.
+// For more information about server-side encryption, including SSE-S3 and
+// SSE-KMS, see Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html)
+// in the Amazon Simple Storage Service Developer Guide.
+//
+// Working with the Response Body
+//
+// Given the response size is unknown, Amazon S3 Select streams the response
+// as a series of messages and includes a Transfer-Encoding header with chunked
+// as its value in the response. For more information, see Appendix: SelectObjectContent
+// Response (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTSelectObjectAppendix.html) .
+//
+// GetObject Support
+//
+// The SelectObjectContent operation does not support the following GetObject
+// functionality. For more information, see GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html).
+//
+// * Range: Although you can specify a scan range for an Amazon S3 Select
+// request (see SelectObjectContentRequest - ScanRange (https://docs.aws.amazon.com/AmazonS3/latest/API/API_SelectObjectContent.html#AmazonS3-SelectObjectContent-request-ScanRange)
+// in the request parameters), you cannot specify the range of bytes of an
+// object to return.
+//
+// * GLACIER, DEEP_ARCHIVE and REDUCED_REDUNDANCY storage classes: You cannot
+// specify the GLACIER, DEEP_ARCHIVE, or REDUCED_REDUNDANCY storage classes.
+// For more information, about storage classes see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html#storage-class-intro)
+// in the Amazon Simple Storage Service Developer Guide.
+//
+// Special Errors
+//
+// For a list of special errors for this operation, see List of SELECT Object
+// Content Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#SelectObjectContentErrorCodeList)
+//
+// Related Resources
+//
+// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)
+//
+// * GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html)
+//
+// * PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -6859,11 +9625,155 @@ func (c *S3) SelectObjectContentWithContext(ctx aws.Context, input *SelectObject
return out, req.Send()
}
-const opUploadPart = "UploadPart"
+var _ awserr.Error
-// UploadPartRequest generates a "aws/request.Request" representing the
-// client's request for the UploadPart operation. The "output" return
-// value will be populated with the request's response once the request completes
+// SelectObjectContentEventStream provides the event stream handling for the SelectObjectContent.
+type SelectObjectContentEventStream struct {
+
+ // Reader is the EventStream reader for the SelectObjectContentEventStream
+ // events. This value is automatically set by the SDK when the API call is made
+ // Use this member when unit testing your code with the SDK to mock out the
+ // EventStream Reader.
+ //
+ // Must not be nil.
+ Reader SelectObjectContentEventStreamReader
+
+ outputReader io.ReadCloser
+
+ // StreamCloser is the io.Closer for the EventStream connection. For HTTP
+ // EventStream this is the response Body. The stream will be closed when
+ // the Close method of the EventStream is called.
+ StreamCloser io.Closer
+
+ done chan struct{}
+ closeOnce sync.Once
+ err *eventstreamapi.OnceError
+}
+
+func newSelectObjectContentEventStream() *SelectObjectContentEventStream {
+ return &SelectObjectContentEventStream{
+ done: make(chan struct{}),
+ err: eventstreamapi.NewOnceError(),
+ }
+}
+
+func (es *SelectObjectContentEventStream) setStreamCloser(r *request.Request) {
+ es.StreamCloser = r.HTTPResponse.Body
+}
+
+func (es *SelectObjectContentEventStream) runOnStreamPartClose(r *request.Request) {
+ if es.done == nil {
+ return
+ }
+ go es.waitStreamPartClose()
+
+}
+
+func (es *SelectObjectContentEventStream) waitStreamPartClose() {
+ var outputErrCh <-chan struct{}
+ if v, ok := es.Reader.(interface{ ErrorSet() <-chan struct{} }); ok {
+ outputErrCh = v.ErrorSet()
+ }
+ var outputClosedCh <-chan struct{}
+ if v, ok := es.Reader.(interface{ Closed() <-chan struct{} }); ok {
+ outputClosedCh = v.Closed()
+ }
+
+ select {
+ case <-es.done:
+ case <-outputErrCh:
+ es.err.SetError(es.Reader.Err())
+ es.Close()
+ case <-outputClosedCh:
+ if err := es.Reader.Err(); err != nil {
+ es.err.SetError(es.Reader.Err())
+ }
+ es.Close()
+ }
+}
+
+// Events returns a channel to read events from.
+//
+// These events are:
+//
+// * ContinuationEvent
+// * EndEvent
+// * ProgressEvent
+// * RecordsEvent
+// * StatsEvent
+// * SelectObjectContentEventStreamUnknownEvent
+func (es *SelectObjectContentEventStream) Events() <-chan SelectObjectContentEventStreamEvent {
+ return es.Reader.Events()
+}
+
+func (es *SelectObjectContentEventStream) runOutputStream(r *request.Request) {
+ var opts []func(*eventstream.Decoder)
+ if r.Config.Logger != nil && r.Config.LogLevel.Matches(aws.LogDebugWithEventStreamBody) {
+ opts = append(opts, eventstream.DecodeWithLogger(r.Config.Logger))
+ }
+
+ unmarshalerForEvent := unmarshalerForSelectObjectContentEventStreamEvent{
+ metadata: protocol.ResponseMetadata{
+ StatusCode: r.HTTPResponse.StatusCode,
+ RequestID: r.RequestID,
+ },
+ }.UnmarshalerForEventName
+
+ decoder := eventstream.NewDecoder(r.HTTPResponse.Body, opts...)
+ eventReader := eventstreamapi.NewEventReader(decoder,
+ protocol.HandlerPayloadUnmarshal{
+ Unmarshalers: r.Handlers.UnmarshalStream,
+ },
+ unmarshalerForEvent,
+ )
+
+ es.outputReader = r.HTTPResponse.Body
+ es.Reader = newReadSelectObjectContentEventStream(eventReader)
+}
+
+// Close closes the stream. This will also cause the stream to be closed.
+// Close must be called when done using the stream API. Not calling Close
+// may result in resource leaks.
+//
+// You can use the closing of the Reader's Events channel to terminate your
+// application's read from the API's stream.
+//
+func (es *SelectObjectContentEventStream) Close() (err error) {
+ es.closeOnce.Do(es.safeClose)
+ return es.Err()
+}
+
+func (es *SelectObjectContentEventStream) safeClose() {
+ if es.done != nil {
+ close(es.done)
+ }
+
+ es.Reader.Close()
+ if es.outputReader != nil {
+ es.outputReader.Close()
+ }
+
+ es.StreamCloser.Close()
+}
+
+// Err returns any error that occurred while reading or writing EventStream
+// Events from the service API's response. Returns nil if there were no errors.
+func (es *SelectObjectContentEventStream) Err() error {
+ if err := es.err.Err(); err != nil {
+ return err
+ }
+ if err := es.Reader.Err(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+const opUploadPart = "UploadPart"
+
+// UploadPartRequest generates a "aws/request.Request" representing the
+// client's request for the UploadPart operation. The "output" return
+// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
@@ -6905,12 +9815,88 @@ func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, ou
//
// Uploads a part in a multipart upload.
//
+// In this operation, you provide part data in your request. However, you have
+// an option to specify your existing Amazon S3 object as a data source for
+// the part you are uploading. To upload a part from an existing object, you
+// use the UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html)
+// operation.
+//
+// You must initiate a multipart upload (see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html))
+// before you can upload any part. In response to your initiate request, Amazon
+// S3 returns an upload ID, a unique identifier, that you must include in your
+// upload part request.
+//
+// Part numbers can be any number from 1 to 10,000, inclusive. A part number
+// uniquely identifies a part and also defines its position within the object
+// being created. If you upload a new part using the same part number that was
+// used with a previous part, the previously uploaded part is overwritten. Each
+// part must be at least 5 MB in size, except the last part. There is no size
+// limit on the last part of your multipart upload.
+//
+// To ensure that data is not corrupted when traversing the network, specify
+// the Content-MD5 header in the upload part request. Amazon S3 checks the part
+// data against the provided MD5 value. If they do not match, Amazon S3 returns
+// an error.
+//
// Note: After you initiate multipart upload and upload one or more parts, you
// must either complete or abort multipart upload in order to stop getting charged
// for storage of the uploaded parts. Only after you either complete or abort
// multipart upload, Amazon S3 frees up the parts storage and stops charging
// you for the parts storage.
//
+// For more information on multipart uploads, go to Multipart Upload Overview
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) in the
+// Amazon Simple Storage Service Developer Guide .
+//
+// For information on the permissions required to use the multipart upload API,
+// go to Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html)
+// in the Amazon Simple Storage Service Developer Guide.
+//
+// You can optionally request server-side encryption where Amazon S3 encrypts
+// your data as it writes it to disks in its data centers and decrypts it for
+// you when you access it. You have the option of providing your own encryption
+// key, or you can use the AWS managed encryption keys. If you choose to provide
+// your own encryption key, the request headers you provide in the request must
+// match the headers you used in the request to initiate the upload by using
+// CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html).
+// For more information, go to Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html)
+// in the Amazon Simple Storage Service Developer Guide.
+//
+// Server-side encryption is supported by the S3 Multipart Upload actions. Unless
+// you are using a customer-provided encryption key, you don't need to specify
+// the encryption parameters in each UploadPart request. Instead, you only need
+// to specify the server-side encryption parameters in the initial Initiate
+// Multipart request. For more information, see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html).
+//
+// If you requested server-side encryption using a customer-provided encryption
+// key in your initiate multipart upload request, you must provide identical
+// encryption information in each part upload using the following headers.
+//
+// * x-amz-server-side-encryption-customer-algorithm
+//
+// * x-amz-server-side-encryption-customer-key
+//
+// * x-amz-server-side-encryption-customer-key-MD5
+//
+// Special Errors
+//
+// * Code: NoSuchUpload Cause: The specified multipart upload does not exist.
+// The upload ID might be invalid, or the multipart upload might have been
+// aborted or completed. HTTP Status Code: 404 Not Found SOAP Fault Code
+// Prefix: Client
+//
+// Related Resources
+//
+// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)
+//
+// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html)
+//
+// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html)
+//
+// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html)
+//
+// * ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html)
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -6983,7 +9969,95 @@ func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Req
// UploadPartCopy API operation for Amazon Simple Storage Service.
//
-// Uploads a part by copying data from an existing object as data source.
+// Uploads a part by copying data from an existing object as data source. You
+// specify the data source by adding the request header x-amz-copy-source in
+// your request and a byte range by adding the request header x-amz-copy-source-range
+// in your request.
+//
+// The minimum allowable part size for a multipart upload is 5 MB. For more
+// information about multipart upload limits, go to Quick Facts (https://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html)
+// in the Amazon Simple Storage Service Developer Guide.
+//
+// Instead of using an existing object as part data, you might use the UploadPart
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) operation
+// and provide data in your request.
+//
+// You must initiate a multipart upload before you can upload any part. In response
+// to your initiate request. Amazon S3 returns a unique identifier, the upload
+// ID, that you must include in your upload part request.
+//
+// For more information about using the UploadPartCopy operation, see the following:
+//
+// * For conceptual information about multipart uploads, see Uploading Objects
+// Using Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html)
+// in the Amazon Simple Storage Service Developer Guide.
+//
+// * For information about permissions required to use the multipart upload
+// API, see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html)
+// in the Amazon Simple Storage Service Developer Guide.
+//
+// * For information about copying objects using a single atomic operation
+// vs. the multipart upload, see Operations on Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html)
+// in the Amazon Simple Storage Service Developer Guide.
+//
+// * For information about using server-side encryption with customer-provided
+// encryption keys with the UploadPartCopy operation, see CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html)
+// and UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html).
+//
+// Note the following additional considerations about the request headers x-amz-copy-source-if-match,
+// x-amz-copy-source-if-none-match, x-amz-copy-source-if-unmodified-since, and
+// x-amz-copy-source-if-modified-since:
+//
+// * Consideration 1 - If both of the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since
+// headers are present in the request as follows: x-amz-copy-source-if-match
+// condition evaluates to true, and; x-amz-copy-source-if-unmodified-since
+// condition evaluates to false; Amazon S3 returns 200 OK and copies the
+// data.
+//
+// * Consideration 2 - If both of the x-amz-copy-source-if-none-match and
+// x-amz-copy-source-if-modified-since headers are present in the request
+// as follows: x-amz-copy-source-if-none-match condition evaluates to false,
+// and; x-amz-copy-source-if-modified-since condition evaluates to true;
+// Amazon S3 returns 412 Precondition Failed response code.
+//
+// Versioning
+//
+// If your bucket has versioning enabled, you could have multiple versions of
+// the same object. By default, x-amz-copy-source identifies the current version
+// of the object to copy. If the current version is a delete marker and you
+// don't specify a versionId in the x-amz-copy-source, Amazon S3 returns a 404
+// error, because the object does not exist. If you specify versionId in the
+// x-amz-copy-source and the versionId is a delete marker, Amazon S3 returns
+// an HTTP 400 error, because you are not allowed to specify a delete marker
+// as a version for the x-amz-copy-source.
+//
+// You can optionally specify a specific version of the source object to copy
+// by adding the versionId subresource as shown in the following example:
+//
+// x-amz-copy-source: /bucket/object?versionId=version id
+//
+// Special Errors
+//
+// * Code: NoSuchUpload Cause: The specified multipart upload does not exist.
+// The upload ID might be invalid, or the multipart upload might have been
+// aborted or completed. HTTP Status Code: 404 Not Found
+//
+// * Code: InvalidRequest Cause: The specified copy source is not supported
+// as a byte-range copy source. HTTP Status Code: 400 Bad Request
+//
+// Related Resources
+//
+// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)
+//
+// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)
+//
+// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html)
+//
+// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html)
+//
+// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html)
+//
+// * ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -7045,7 +10119,14 @@ func (s *AbortIncompleteMultipartUpload) SetDaysAfterInitiation(v int64) *AbortI
type AbortMultipartUploadInput struct {
_ struct{} `locationName:"AbortMultipartUploadRequest" type:"structure"`
- // Name of the bucket to which the multipart upload was initiated.
+ // The bucket name to which the upload was taking place.
+ //
+ // When using this API with an access point, you must direct requests to the
+ // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // When using this operation using an access point through the AWS SDKs, you
+ // provide the access point ARN in place of the bucket name. For more information
+ // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
+ // in the Amazon Simple Storage Service Developer Guide.
//
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@@ -7055,10 +10136,11 @@ type AbortMultipartUploadInput struct {
// Key is a required field
Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
- // Confirms that the requester knows that she or he will be charged for the
- // request. Bucket owners need not specify this parameter in their requests.
- // Documentation on downloading objects from requester pays buckets can be found
- // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
// Upload ID that identifies the multipart upload.
@@ -7133,6 +10215,20 @@ func (s *AbortMultipartUploadInput) SetUploadId(v string) *AbortMultipartUploadI
return s
}
+func (s *AbortMultipartUploadInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *AbortMultipartUploadInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type AbortMultipartUploadOutput struct {
_ struct{} `type:"structure"`
@@ -7335,9 +10431,6 @@ func (s *AnalyticsAndOperator) SetTags(v []*Tag) *AnalyticsAndOperator {
// Specifies the configuration and any analyses for the analytics filter of
// an Amazon S3 bucket.
-//
-// For more information, see GET Bucket analytics (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETAnalyticsConfig.html)
-// in the Amazon Simple Storage Service API Reference.
type AnalyticsConfiguration struct {
_ struct{} `type:"structure"`
@@ -7456,6 +10549,9 @@ func (s *AnalyticsExportDestination) SetS3BucketDestination(v *AnalyticsS3Bucket
return s
}
+// The filter used to describe a set of objects for analyses. A filter must
+// have exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator).
+// If no filter is provided, all objects will be considered in any analysis.
type AnalyticsFilter struct {
_ struct{} `type:"structure"`
@@ -7518,6 +10614,7 @@ func (s *AnalyticsFilter) SetTag(v *Tag) *AnalyticsFilter {
return s
}
+// Contains information about where to publish the analytics results.
type AnalyticsS3BucketDestination struct {
_ struct{} `type:"structure"`
@@ -7526,8 +10623,11 @@ type AnalyticsS3BucketDestination struct {
// Bucket is a required field
Bucket *string `type:"string" required:"true"`
- // The account ID that owns the destination bucket. If no account ID is provided,
- // the owner will not be validated prior to exporting data.
+ // The account ID that owns the destination S3 bucket. If no account ID is provided,
+ // the owner is not validated before exporting data.
+ //
+ // Although this value is optional, we strongly recommend that you set it to
+ // help prevent problems if the destination bucket ownership changes.
BucketAccountId *string `type:"string"`
// Specifies the file format used when exporting data to Amazon S3.
@@ -7596,6 +10696,8 @@ func (s *AnalyticsS3BucketDestination) SetPrefix(v string) *AnalyticsS3BucketDes
return s
}
+// In terms of implementation, a Bucket is a resource. An Amazon S3 bucket name
+// is globally unique, and the namespace is shared by all AWS accounts.
type Bucket struct {
_ struct{} `type:"structure"`
@@ -7679,6 +10781,7 @@ func (s *BucketLifecycleConfiguration) SetRules(v []*LifecycleRule) *BucketLifec
return s
}
+// Container for logging status information.
type BucketLoggingStatus struct {
_ struct{} `type:"structure"`
@@ -7727,7 +10830,8 @@ func (s *BucketLoggingStatus) SetLoggingEnabled(v *LoggingEnabled) *BucketLoggin
type CORSConfiguration struct {
_ struct{} `type:"structure"`
- // A set of allowed origins and methods.
+ // A set of origins and methods (cross-origin access that you want to allow).
+ // You can add up to 100 rules to the configuration.
//
// CORSRules is a required field
CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true" required:"true"`
@@ -7859,7 +10963,8 @@ func (s *CORSRule) SetMaxAgeSeconds(v int64) *CORSRule {
return s
}
-// Describes how a CSV-formatted input object is formatted.
+// Describes how an uncompressed comma-separated values (CSV)-formatted input
+// object is formatted.
type CSVInput struct {
_ struct{} `type:"structure"`
@@ -7868,24 +10973,45 @@ type CSVInput struct {
// to TRUE may lower performance.
AllowQuotedRecordDelimiter *bool `type:"boolean"`
- // The single character used to indicate a row should be ignored when present
- // at the start of a row.
+ // A single character used to indicate that a row should be ignored when the
+ // character is present at the start of that row. You can specify any character
+ // to indicate a comment line.
Comments *string `type:"string"`
- // The value used to separate individual fields in a record.
+ // A single character used to separate individual fields in a record. You can
+ // specify an arbitrary delimiter.
FieldDelimiter *string `type:"string"`
- // Describes the first line of input. Valid values: None, Ignore, Use.
+ // Describes the first line of input. Valid values are:
+ //
+ // * NONE: First line is not a header.
+ //
+ // * IGNORE: First line is a header, but you can't use the header values
+ // to indicate the column in an expression. You can use column position (such
+ // as _1, _2, …) to indicate the column (SELECT s._1 FROM OBJECT s).
+ //
+ // * Use: First line is a header, and you can use the header value to identify
+ // a column in an expression (SELECT "name" FROM OBJECT).
FileHeaderInfo *string `type:"string" enum:"FileHeaderInfo"`
- // Value used for escaping where the field delimiter is part of the value.
+ // A single character used for escaping when the field delimiter is part of
+ // the value. For example, if the value is a, b, Amazon S3 wraps this field
+ // value in quotation marks, as follows: " a , b ".
+ //
+ // Type: String
+ //
+ // Default: "
+ //
+ // Ancestors: CSV
QuoteCharacter *string `type:"string"`
- // The single character used for escaping the quote character inside an already
- // escaped value.
+ // A single character used for escaping the quotation mark character inside
+ // an already escaped value. For example, the value """ a , b """ is parsed
+ // as " a , b ".
QuoteEscapeCharacter *string `type:"string"`
- // The value used to separate individual records.
+ // A single character used to separate individual records in the input. Instead
+ // of the default value, you can specify an arbitrary delimiter.
RecordDelimiter *string `type:"string"`
}
@@ -7941,24 +11067,33 @@ func (s *CSVInput) SetRecordDelimiter(v string) *CSVInput {
return s
}
-// Describes how CSV-formatted results are formatted.
+// Describes how uncompressed comma-separated values (CSV)-formatted results
+// are formatted.
type CSVOutput struct {
_ struct{} `type:"structure"`
- // The value used to separate individual fields in a record.
+ // The value used to separate individual fields in a record. You can specify
+ // an arbitrary delimiter.
FieldDelimiter *string `type:"string"`
- // The value used for escaping where the field delimiter is part of the value.
+ // A single character used for escaping when the field delimiter is part of
+ // the value. For example, if the value is a, b, Amazon S3 wraps this field
+ // value in quotation marks, as follows: " a , b ".
QuoteCharacter *string `type:"string"`
- // Th single character used for escaping the quote character inside an already
+ // The single character used for escaping the quote character inside an already
// escaped value.
QuoteEscapeCharacter *string `type:"string"`
- // Indicates whether or not all output fields should be quoted.
+ // Indicates whether to use quotation marks around output fields.
+ //
+ // * ALWAYS: Always use quotation marks for output fields.
+ //
+ // * ASNEEDED: Use quotation marks for output fields when needed.
QuoteFields *string `type:"string" enum:"QuoteFields"`
- // The value used to separate individual records.
+ // A single character used to separate individual records in the output. Instead
+ // of the default value, you can specify an arbitrary delimiter.
RecordDelimiter *string `type:"string"`
}
@@ -8002,9 +11137,12 @@ func (s *CSVOutput) SetRecordDelimiter(v string) *CSVOutput {
return s
}
+// Container for specifying the AWS Lambda notification configuration.
type CloudFunctionConfiguration struct {
_ struct{} `type:"structure"`
+ // Lambda cloud function ARN that Amazon S3 can invoke when it detects events
+ // of the specified type.
CloudFunction *string `type:"string"`
// The bucket event for which to send notifications.
@@ -8012,12 +11150,14 @@ type CloudFunctionConfiguration struct {
// Deprecated: Event has been deprecated
Event *string `deprecated:"true" type:"string" enum:"Event"`
+ // Bucket events for which to send notifications.
Events []*string `locationName:"Event" type:"list" flattened:"true"`
// An optional unique identifier for configurations in a notification configuration.
// If you don't provide one, Amazon S3 will assign an ID.
Id *string `type:"string"`
+ // The role supporting the invocation of the Lambda function
InvocationRole *string `type:"string"`
}
@@ -8061,9 +11201,15 @@ func (s *CloudFunctionConfiguration) SetInvocationRole(v string) *CloudFunctionC
return s
}
+// Container for all (if there are any) keys between Prefix and the next occurrence
+// of the string specified by a delimiter. CommonPrefixes lists keys that act
+// like subdirectories in the directory specified by Prefix. For example, if
+// the prefix is notes/ and the delimiter is a slash (/) as in notes/summer/july,
+// the common prefix is notes/summer/.
type CommonPrefix struct {
_ struct{} `type:"structure"`
+ // Container for the specified common prefix.
Prefix *string `type:"string"`
}
@@ -8086,20 +11232,28 @@ func (s *CommonPrefix) SetPrefix(v string) *CommonPrefix {
type CompleteMultipartUploadInput struct {
_ struct{} `locationName:"CompleteMultipartUploadRequest" type:"structure" payload:"MultipartUpload"`
+ // Name of the bucket to which the multipart upload was initiated.
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+ // Object key for which the multipart upload was initiated.
+ //
// Key is a required field
Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+ // The container for the multipart upload request information.
MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
- // Confirms that the requester knows that she or he will be charged for the
- // request. Bucket owners need not specify this parameter in their requests.
- // Documentation on downloading objects from requester pays buckets can be found
- // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+ // ID for the initiated multipart upload.
+ //
// UploadId is a required field
UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"`
}
@@ -8176,35 +11330,61 @@ func (s *CompleteMultipartUploadInput) SetUploadId(v string) *CompleteMultipartU
return s
}
+func (s *CompleteMultipartUploadInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *CompleteMultipartUploadInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type CompleteMultipartUploadOutput struct {
_ struct{} `type:"structure"`
+ // The name of the bucket that contains the newly created object.
Bucket *string `type:"string"`
- // Entity tag of the object.
+ // Entity tag that identifies the newly created object's data. Objects with
+ // different object data will have different entity tags. The entity tag is
+ // an opaque string. The entity tag may or may not be an MD5 digest of the object
+ // data. If the entity tag is not an MD5 digest of the object data, it will
+ // contain one or more nonhexadecimal characters and/or will consist of less
+ // than 32 or more than 32 hexadecimal digits.
ETag *string `type:"string"`
// If the object expiration is configured, this will contain the expiration
// date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded.
Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"`
+ // The object key of the newly created object.
Key *string `min:"1" type:"string"`
+ // The URI that identifies the newly created object.
Location *string `type:"string"`
// If present, indicates that the requester was successfully charged for the
// request.
RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
- // If present, specifies the ID of the AWS Key Management Service (KMS) master
- // encryption key that was used for the object.
+ // If present, specifies the ID of the AWS Key Management Service (AWS KMS)
+ // symmetric customer managed customer master key (CMK) that was used for the
+ // object.
SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
- // The Server-side encryption algorithm used when storing this object in S3
- // (e.g., AES256, aws:kms).
+ // If you specified server-side encryption either with an Amazon S3-managed
+ // encryption key or an AWS KMS customer master key (CMK) in your initiate multipart
+ // upload request, the response includes this header. It confirms the encryption
+ // algorithm that Amazon S3 used to encrypt the object.
ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
- // Version of the object.
+ // Version ID of the newly created object, in case the bucket has versioning
+ // turned on.
VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
}
@@ -8279,9 +11459,11 @@ func (s *CompleteMultipartUploadOutput) SetVersionId(v string) *CompleteMultipar
return s
}
+// The container for the completed multipart upload details.
type CompletedMultipartUpload struct {
_ struct{} `type:"structure"`
+ // Array of CompletedPart data types.
Parts []*CompletedPart `locationName:"Part" type:"list" flattened:"true"`
}
@@ -8301,6 +11483,7 @@ func (s *CompletedMultipartUpload) SetParts(v []*CompletedPart) *CompletedMultip
return s
}
+// Details of the parts that were uploaded.
type CompletedPart struct {
_ struct{} `type:"structure"`
@@ -8334,7 +11517,10 @@ func (s *CompletedPart) SetPartNumber(v int64) *CompletedPart {
return s
}
-// Specifies a condition that must be met for a redirect to apply.
+// A container for describing a condition that must be met for the specified
+// redirect to apply. For example, 1. If request is for pages in the /docs folder,
+// redirect to the /documents folder. 2. If request results in HTTP error 4xx,
+// redirect request to another host where you might process the error.
type Condition struct {
_ struct{} `type:"structure"`
@@ -8403,12 +11589,21 @@ func (s *ContinuationEvent) UnmarshalEvent(
return nil
}
+// MarshalEvent marshals the type into an stream event value. This method
+// should only used internally within the SDK's EventStream handling.
+func (s *ContinuationEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) {
+ msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType))
+ return msg, err
+}
+
type CopyObjectInput struct {
_ struct{} `locationName:"CopyObjectRequest" type:"structure"`
// The canned ACL to apply to the object.
ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
+ // The name of the destination bucket.
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@@ -8448,7 +11643,8 @@ type CopyObjectInput struct {
// Copies the object if it hasn't been modified since the specified time.
CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp"`
- // Specifies the algorithm to use when decrypting the source object (e.g., AES256).
+ // Specifies the algorithm to use when decrypting the source object (for example,
+ // AES256).
CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"`
// Specifies the customer-provided encryption key for Amazon S3 to use to decrypt
@@ -8457,8 +11653,8 @@ type CopyObjectInput struct {
CopySourceSSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"`
// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
- // Amazon S3 uses this header for a message integrity check to ensure the encryption
- // key was transmitted without error.
+ // Amazon S3 uses this header for a message integrity check to ensure that the
+ // encryption key was transmitted without error.
CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"`
// The date and time at which the object is no longer cacheable.
@@ -8476,6 +11672,8 @@ type CopyObjectInput struct {
// Allows grantee to write the ACL for the applicable object.
GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
+ // The key of the destination object.
+ //
// Key is a required field
Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
@@ -8489,31 +11687,33 @@ type CopyObjectInput struct {
// Specifies whether you want to apply a Legal Hold to the copied object.
ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"`
- // The object lock mode that you want to apply to the copied object.
+ // The Object Lock mode that you want to apply to the copied object.
ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"`
- // The date and time when you want the copied object's object lock to expire.
+ // The date and time when you want the copied object's Object Lock to expire.
ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"`
- // Confirms that the requester knows that she or he will be charged for the
- // request. Bucket owners need not specify this parameter in their requests.
- // Documentation on downloading objects from requester pays buckets can be found
- // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
- // Specifies the algorithm to use to when encrypting the object (e.g., AES256).
+ // Specifies the algorithm to use to when encrypting the object (for example,
+ // AES256).
SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
// Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
// data. This value is used to store the object and then it is discarded; Amazon
- // does not store the encryption key. The key must be appropriate for use with
- // the algorithm specified in the x-amz-server-side-encryption-customer-algorithm
+ // S3 does not store the encryption key. The key must be appropriate for use
+ // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm
// header.
SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
- // Amazon S3 uses this header for a message integrity check to ensure the encryption
- // key was transmitted without error.
+ // Amazon S3 uses this header for a message integrity check to ensure that the
+ // encryption key was transmitted without error.
SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
// Specifies the AWS KMS Encryption Context to use for object encryption. The
@@ -8523,12 +11723,14 @@ type CopyObjectInput struct {
// Specifies the AWS KMS key ID to use for object encryption. All GET and PUT
// requests for an object protected by AWS KMS will fail if not made via SSL
- // or using SigV4. Documentation on configuring any of the officially supported
- // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version
+ // or using SigV4. For information about configuring using any of the officially
+ // supported AWS SDKs and AWS CLI, see Specifying the Signature Version in Request
+ // Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version)
+ // in the Amazon S3 Developer Guide.
SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
- // The Server-side encryption algorithm used when storing this object in S3
- // (e.g., AES256, aws:kms).
+ // The server-side encryption algorithm used when storing this object in Amazon
+ // S3 (for example, AES256, aws:kms).
ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
// The type of storage to use for the object. Defaults to 'STANDARD'.
@@ -8536,7 +11738,7 @@ type CopyObjectInput struct {
// The tag-set for the object destination object this value must be used in
// conjunction with the TaggingDirective. The tag-set must be encoded as URL
- // Query parameters
+ // Query parameters.
Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"`
// Specifies whether the object tag-set are copied from the source object or
@@ -8827,11 +12029,27 @@ func (s *CopyObjectInput) SetWebsiteRedirectLocation(v string) *CopyObjectInput
return s
}
+func (s *CopyObjectInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *CopyObjectInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type CopyObjectOutput struct {
_ struct{} `type:"structure" payload:"CopyObjectResult"`
+ // Container for all response elements.
CopyObjectResult *CopyObjectResult `type:"structure"`
+ // Version of the copied object in the destination bucket.
CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"`
// If the object expiration is configured, the response includes this header.
@@ -8847,7 +12065,7 @@ type CopyObjectOutput struct {
SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
// If server-side encryption with a customer-provided encryption key was requested,
- // the response will include this header to provide round trip message integrity
+ // the response will include this header to provide round-trip message integrity
// verification of the customer-provided encryption key.
SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
@@ -8856,12 +12074,13 @@ type CopyObjectOutput struct {
// the encryption context key-value pairs.
SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"`
- // If present, specifies the ID of the AWS Key Management Service (KMS) master
- // encryption key that was used for the object.
+ // If present, specifies the ID of the AWS Key Management Service (AWS KMS)
+ // symmetric customer managed customer master key (CMK) that was used for the
+ // object.
SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
- // The Server-side encryption algorithm used when storing this object in S3
- // (e.g., AES256, aws:kms).
+ // The server-side encryption algorithm used when storing this object in Amazon
+ // S3 (for example, AES256, aws:kms).
ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
// Version ID of the newly created copy.
@@ -8938,11 +12157,16 @@ func (s *CopyObjectOutput) SetVersionId(v string) *CopyObjectOutput {
return s
}
+// Container for all response elements.
type CopyObjectResult struct {
_ struct{} `type:"structure"`
+ // Returns the ETag of the new object. The ETag reflects only changes to the
+ // contents of an object, not its metadata. The source and destination ETag
+ // is identical for a successfully copied object.
ETag *string `type:"string"`
+ // Returns the date that the object was last modified.
LastModified *time.Time `type:"timestamp"`
}
@@ -8968,6 +12192,7 @@ func (s *CopyObjectResult) SetLastModified(v time.Time) *CopyObjectResult {
return s
}
+// Container for all response elements.
type CopyPartResult struct {
_ struct{} `type:"structure"`
@@ -9000,11 +12225,12 @@ func (s *CopyPartResult) SetLastModified(v time.Time) *CopyPartResult {
return s
}
+// The configuration information for the bucket.
type CreateBucketConfiguration struct {
_ struct{} `type:"structure"`
- // Specifies the region where the bucket will be created. If you don't specify
- // a region, the bucket is created in US East (N. Virginia) Region (us-east-1).
+ // Specifies the Region where the bucket will be created. If you don't specify
+ // a Region, the bucket is created in the US East (N. Virginia) Region (us-east-1).
LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"`
}
@@ -9030,9 +12256,12 @@ type CreateBucketInput struct {
// The canned ACL to apply to the bucket.
ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"`
+ // The name of the bucket to create.
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+ // The configuration information for the bucket.
CreateBucketConfiguration *CreateBucketConfiguration `locationName:"CreateBucketConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
// Allows grantee the read, write, read ACP, and write ACP permissions on the
@@ -9051,8 +12280,7 @@ type CreateBucketInput struct {
// Allows grantee to write the ACL for the applicable bucket.
GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
- // Specifies whether you want Amazon S3 object lock to be enabled for the new
- // bucket.
+ // Specifies whether you want S3 Object Lock to be enabled for the new bucket.
ObjectLockEnabledForBucket *bool `location:"header" locationName:"x-amz-bucket-object-lock-enabled" type:"boolean"`
}
@@ -9146,6 +12374,9 @@ func (s *CreateBucketInput) SetObjectLockEnabledForBucket(v bool) *CreateBucketI
type CreateBucketOutput struct {
_ struct{} `type:"structure"`
+ // Specifies the Region where the bucket will be created. If you are creating
+ // a bucket on the US East (N. Virginia) Region (us-east-1), you do not need
+ // to specify the location.
Location *string `location:"header" locationName:"Location" type:"string"`
}
@@ -9171,6 +12402,8 @@ type CreateMultipartUploadInput struct {
// The canned ACL to apply to the object.
ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
+ // The name of the bucket to which to initiate the upload
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@@ -9206,6 +12439,8 @@ type CreateMultipartUploadInput struct {
// Allows grantee to write the ACL for the applicable object.
GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
+ // Object key for which the multipart upload is to be initiated.
+ //
// Key is a required field
Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
@@ -9215,31 +12450,33 @@ type CreateMultipartUploadInput struct {
// Specifies whether you want to apply a Legal Hold to the uploaded object.
ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"`
- // Specifies the object lock mode that you want to apply to the uploaded object.
+ // Specifies the Object Lock mode that you want to apply to the uploaded object.
ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"`
- // Specifies the date and time when you want the object lock to expire.
+ // Specifies the date and time when you want the Object Lock to expire.
ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"`
- // Confirms that the requester knows that she or he will be charged for the
- // request. Bucket owners need not specify this parameter in their requests.
- // Documentation on downloading objects from requester pays buckets can be found
- // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
- // Specifies the algorithm to use to when encrypting the object (e.g., AES256).
+ // Specifies the algorithm to use to when encrypting the object (for example,
+ // AES256).
SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
// Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
// data. This value is used to store the object and then it is discarded; Amazon
- // does not store the encryption key. The key must be appropriate for use with
- // the algorithm specified in the x-amz-server-side-encryption-customer-algorithm
+ // S3 does not store the encryption key. The key must be appropriate for use
+ // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm
// header.
SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
- // Amazon S3 uses this header for a message integrity check to ensure the encryption
- // key was transmitted without error.
+ // Amazon S3 uses this header for a message integrity check to ensure that the
+ // encryption key was transmitted without error.
SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
// Specifies the AWS KMS Encryption Context to use for object encryption. The
@@ -9247,20 +12484,22 @@ type CreateMultipartUploadInput struct {
// encryption context key-value pairs.
SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"`
- // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT
- // requests for an object protected by AWS KMS will fail if not made via SSL
- // or using SigV4. Documentation on configuring any of the officially supported
- // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version
+ // Specifies the ID of the symmetric customer managed AWS KMS CMK to use for
+ // object encryption. All GET and PUT requests for an object protected by AWS
+ // KMS will fail if not made via SSL or using SigV4. For information about configuring
+ // using any of the officially supported AWS SDKs and AWS CLI, see Specifying
+ // the Signature Version in Request Authentication (https://docs.aws.amazon.com/http:/docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version)
+ // in the Amazon S3 Developer Guide.
SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
- // The Server-side encryption algorithm used when storing this object in S3
- // (e.g., AES256, aws:kms).
+ // The server-side encryption algorithm used when storing this object in Amazon
+ // S3 (for example, AES256, aws:kms).
ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
// The type of storage to use for the object. Defaults to 'STANDARD'.
StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
- // The tag-set for the object. The tag-set must be encoded as URL Query parameters
+ // The tag-set for the object. The tag-set must be encoded as URL Query parameters.
Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"`
// If the bucket is configured as a website, redirects requests for this object
@@ -9477,17 +12716,47 @@ func (s *CreateMultipartUploadInput) SetWebsiteRedirectLocation(v string) *Creat
return s
}
+func (s *CreateMultipartUploadInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *CreateMultipartUploadInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type CreateMultipartUploadOutput struct {
_ struct{} `type:"structure"`
- // Date when multipart upload will become eligible for abort operation by lifecycle.
+ // If the bucket has a lifecycle rule configured with an action to abort incomplete
+ // multipart uploads and the prefix in the lifecycle rule matches the object
+ // name in the request, the response includes this header. The header indicates
+ // when the initiated multipart upload becomes eligible for an abort operation.
+ // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket
+ // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config).
+ //
+ // The response also includes the x-amz-abort-rule-id header that provides the
+ // ID of the lifecycle configuration rule that defines this action.
AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"`
- // Id of the lifecycle rule that makes a multipart upload eligible for abort
- // operation.
+ // This header is returned along with the x-amz-abort-date header. It identifies
+ // the applicable lifecycle configuration rule that defines the action to abort
+ // incomplete multipart uploads.
AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"`
// Name of the bucket to which the multipart upload was initiated.
+ //
+ // When using this API with an access point, you must direct requests to the
+ // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // When using this operation using an access point through the AWS SDKs, you
+ // provide the access point ARN in place of the bucket name. For more information
+ // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
+ // in the Amazon Simple Storage Service Developer Guide.
Bucket *string `locationName:"Bucket" type:"string"`
// Object key for which the multipart upload was initiated.
@@ -9503,7 +12772,7 @@ type CreateMultipartUploadOutput struct {
SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
// If server-side encryption with a customer-provided encryption key was requested,
- // the response will include this header to provide round trip message integrity
+ // the response will include this header to provide round-trip message integrity
// verification of the customer-provided encryption key.
SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
@@ -9512,12 +12781,13 @@ type CreateMultipartUploadOutput struct {
// the encryption context key-value pairs.
SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"`
- // If present, specifies the ID of the AWS Key Management Service (KMS) master
- // encryption key that was used for the object.
+ // If present, specifies the ID of the AWS Key Management Service (AWS KMS)
+ // symmetric customer managed customer master key (CMK) that was used for the
+ // object.
SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
- // The Server-side encryption algorithm used when storing this object in S3
- // (e.g., AES256, aws:kms).
+ // The server-side encryption algorithm used when storing this object in Amazon
+ // S3 (for example, AES256, aws:kms).
ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
// ID for the initiated multipart upload.
@@ -9607,7 +12877,7 @@ func (s *CreateMultipartUploadOutput) SetUploadId(v string) *CreateMultipartUplo
return s
}
-// The container element for specifying the default object lock retention settings
+// The container element for specifying the default Object Lock retention settings
// for new objects placed in the specified bucket.
type DefaultRetention struct {
_ struct{} `type:"structure"`
@@ -9615,7 +12885,7 @@ type DefaultRetention struct {
// The number of days that you want to specify for the default retention period.
Days *int64 `type:"integer"`
- // The default object lock retention mode you want to apply to new objects placed
+ // The default Object Lock retention mode you want to apply to new objects placed
// in the specified bucket.
Mode *string `type:"string" enum:"ObjectLockRetentionMode"`
@@ -9651,9 +12921,12 @@ func (s *DefaultRetention) SetYears(v int64) *DefaultRetention {
return s
}
+// Container for the objects to delete.
type Delete struct {
_ struct{} `type:"structure"`
+ // The objects to delete.
+ //
// Objects is a required field
Objects []*ObjectIdentifier `locationName:"Object" type:"list" flattened:"true" required:"true"`
@@ -9769,6 +13042,20 @@ func (s *DeleteBucketAnalyticsConfigurationInput) SetId(v string) *DeleteBucketA
return s
}
+func (s *DeleteBucketAnalyticsConfigurationInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *DeleteBucketAnalyticsConfigurationInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type DeleteBucketAnalyticsConfigurationOutput struct {
_ struct{} `type:"structure"`
}
@@ -9786,6 +13073,8 @@ func (s DeleteBucketAnalyticsConfigurationOutput) GoString() string {
type DeleteBucketCorsInput struct {
_ struct{} `locationName:"DeleteBucketCorsRequest" type:"structure"`
+ // Specifies the bucket whose cors configuration is being deleted.
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
}
@@ -9829,6 +13118,20 @@ func (s *DeleteBucketCorsInput) getBucket() (v string) {
return *s.Bucket
}
+func (s *DeleteBucketCorsInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *DeleteBucketCorsInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type DeleteBucketCorsOutput struct {
_ struct{} `type:"structure"`
}
@@ -9892,6 +13195,20 @@ func (s *DeleteBucketEncryptionInput) getBucket() (v string) {
return *s.Bucket
}
+func (s *DeleteBucketEncryptionInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *DeleteBucketEncryptionInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type DeleteBucketEncryptionOutput struct {
_ struct{} `type:"structure"`
}
@@ -9909,6 +13226,8 @@ func (s DeleteBucketEncryptionOutput) GoString() string {
type DeleteBucketInput struct {
_ struct{} `locationName:"DeleteBucketRequest" type:"structure"`
+ // Specifies the bucket being deleted.
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
}
@@ -9952,6 +13271,20 @@ func (s *DeleteBucketInput) getBucket() (v string) {
return *s.Bucket
}
+func (s *DeleteBucketInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *DeleteBucketInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type DeleteBucketInventoryConfigurationInput struct {
_ struct{} `locationName:"DeleteBucketInventoryConfigurationRequest" type:"structure"`
@@ -10014,7 +13347,21 @@ func (s *DeleteBucketInventoryConfigurationInput) SetId(v string) *DeleteBucketI
return s
}
-type DeleteBucketInventoryConfigurationOutput struct {
+func (s *DeleteBucketInventoryConfigurationInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *DeleteBucketInventoryConfigurationInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
+type DeleteBucketInventoryConfigurationOutput struct {
_ struct{} `type:"structure"`
}
@@ -10031,6 +13378,8 @@ func (s DeleteBucketInventoryConfigurationOutput) GoString() string {
type DeleteBucketLifecycleInput struct {
_ struct{} `locationName:"DeleteBucketLifecycleRequest" type:"structure"`
+ // The bucket name of the lifecycle to delete.
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
}
@@ -10074,6 +13423,20 @@ func (s *DeleteBucketLifecycleInput) getBucket() (v string) {
return *s.Bucket
}
+func (s *DeleteBucketLifecycleInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *DeleteBucketLifecycleInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type DeleteBucketLifecycleOutput struct {
_ struct{} `type:"structure"`
}
@@ -10150,6 +13513,20 @@ func (s *DeleteBucketMetricsConfigurationInput) SetId(v string) *DeleteBucketMet
return s
}
+func (s *DeleteBucketMetricsConfigurationInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *DeleteBucketMetricsConfigurationInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type DeleteBucketMetricsConfigurationOutput struct {
_ struct{} `type:"structure"`
}
@@ -10181,6 +13558,8 @@ func (s DeleteBucketOutput) GoString() string {
type DeleteBucketPolicyInput struct {
_ struct{} `locationName:"DeleteBucketPolicyRequest" type:"structure"`
+ // The bucket name.
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
}
@@ -10224,6 +13603,20 @@ func (s *DeleteBucketPolicyInput) getBucket() (v string) {
return *s.Bucket
}
+func (s *DeleteBucketPolicyInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *DeleteBucketPolicyInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type DeleteBucketPolicyOutput struct {
_ struct{} `type:"structure"`
}
@@ -10243,9 +13636,6 @@ type DeleteBucketReplicationInput struct {
// The bucket name.
//
- // It can take a while to propagate the deletion of a replication configuration
- // to all Amazon S3 systems.
- //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
}
@@ -10289,6 +13679,20 @@ func (s *DeleteBucketReplicationInput) getBucket() (v string) {
return *s.Bucket
}
+func (s *DeleteBucketReplicationInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *DeleteBucketReplicationInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type DeleteBucketReplicationOutput struct {
_ struct{} `type:"structure"`
}
@@ -10306,6 +13710,8 @@ func (s DeleteBucketReplicationOutput) GoString() string {
type DeleteBucketTaggingInput struct {
_ struct{} `locationName:"DeleteBucketTaggingRequest" type:"structure"`
+ // The bucket that has the tag set to be removed.
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
}
@@ -10349,6 +13755,20 @@ func (s *DeleteBucketTaggingInput) getBucket() (v string) {
return *s.Bucket
}
+func (s *DeleteBucketTaggingInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *DeleteBucketTaggingInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type DeleteBucketTaggingOutput struct {
_ struct{} `type:"structure"`
}
@@ -10366,6 +13786,8 @@ func (s DeleteBucketTaggingOutput) GoString() string {
type DeleteBucketWebsiteInput struct {
_ struct{} `locationName:"DeleteBucketWebsiteRequest" type:"structure"`
+ // The bucket name for which you want to remove the website configuration.
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
}
@@ -10409,6 +13831,20 @@ func (s *DeleteBucketWebsiteInput) getBucket() (v string) {
return *s.Bucket
}
+func (s *DeleteBucketWebsiteInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *DeleteBucketWebsiteInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type DeleteBucketWebsiteOutput struct {
_ struct{} `type:"structure"`
}
@@ -10423,6 +13859,7 @@ func (s DeleteBucketWebsiteOutput) GoString() string {
return s.String()
}
+// Information about the delete marker.
type DeleteMarkerEntry struct {
_ struct{} `type:"structure"`
@@ -10436,6 +13873,7 @@ type DeleteMarkerEntry struct {
// Date and time the object was last modified.
LastModified *time.Time `type:"timestamp"`
+ // The account that created the delete marker.>
Owner *Owner `type:"structure"`
// Version ID of an object.
@@ -10482,11 +13920,21 @@ func (s *DeleteMarkerEntry) SetVersionId(v string) *DeleteMarkerEntry {
return s
}
-// Specifies whether Amazon S3 should replicate delete makers.
+// Specifies whether Amazon S3 replicates the delete markers. If you specify
+// a Filter, you must specify this element. However, in the latest version of
+// replication configuration (when Filter is specified), Amazon S3 doesn't replicate
+// delete markers. Therefore, the DeleteMarkerReplication element can contain
+// only Disabled. For an example configuration, see Basic Rule
+// Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config).
+//
+// If you don't specify the Filter element, Amazon S3 assumes that the replication
+// configuration is the earlier version, V1. In the earlier version, Amazon
+// S3 handled replication of delete markers differently. For more information,
+// see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations).
type DeleteMarkerReplication struct {
_ struct{} `type:"structure"`
- // The status of the delete marker replication.
+ // Indicates whether to replicate delete markers.
//
// In the current implementation, Amazon S3 doesn't replicate the delete markers.
// The status must be Disabled.
@@ -10512,24 +13960,38 @@ func (s *DeleteMarkerReplication) SetStatus(v string) *DeleteMarkerReplication {
type DeleteObjectInput struct {
_ struct{} `locationName:"DeleteObjectRequest" type:"structure"`
+ // The bucket name of the bucket containing the object.
+ //
+ // When using this API with an access point, you must direct requests to the
+ // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // When using this operation using an access point through the AWS SDKs, you
+ // provide the access point ARN in place of the bucket name. For more information
+ // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
+ // in the Amazon Simple Storage Service Developer Guide.
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // Indicates whether Amazon S3 object lock should bypass governance-mode restrictions
+ // Indicates whether S3 Object Lock should bypass Governance-mode restrictions
// to process this operation.
BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"`
+ // Key name of the object to delete.
+ //
// Key is a required field
Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
// The concatenation of the authentication device's serial number, a space,
- // and the value that is displayed on your authentication device.
+ // and the value that is displayed on your authentication device. Required to
+ // permanently delete a versioned object if versioning is configured with MFA
+ // delete enabled.
MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"`
- // Confirms that the requester knows that she or he will be charged for the
- // request. Bucket owners need not specify this parameter in their requests.
- // Documentation on downloading objects from requester pays buckets can be found
- // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
// VersionId used to reference a specific version of the object.
@@ -10611,6 +14073,20 @@ func (s *DeleteObjectInput) SetVersionId(v string) *DeleteObjectInput {
return s
}
+func (s *DeleteObjectInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *DeleteObjectInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type DeleteObjectOutput struct {
_ struct{} `type:"structure"`
@@ -10658,9 +14134,20 @@ func (s *DeleteObjectOutput) SetVersionId(v string) *DeleteObjectOutput {
type DeleteObjectTaggingInput struct {
_ struct{} `locationName:"DeleteObjectTaggingRequest" type:"structure"`
+ // The bucket name containing the objects from which to remove the tags.
+ //
+ // When using this API with an access point, you must direct requests to the
+ // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // When using this operation using an access point through the AWS SDKs, you
+ // provide the access point ARN in place of the bucket name. For more information
+ // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
+ // in the Amazon Simple Storage Service Developer Guide.
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+ // Name of the object key.
+ //
// Key is a required field
Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
@@ -10725,6 +14212,20 @@ func (s *DeleteObjectTaggingInput) SetVersionId(v string) *DeleteObjectTaggingIn
return s
}
+func (s *DeleteObjectTaggingInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *DeleteObjectTaggingInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type DeleteObjectTaggingOutput struct {
_ struct{} `type:"structure"`
@@ -10751,25 +14252,39 @@ func (s *DeleteObjectTaggingOutput) SetVersionId(v string) *DeleteObjectTaggingO
type DeleteObjectsInput struct {
_ struct{} `locationName:"DeleteObjectsRequest" type:"structure" payload:"Delete"`
+ // The bucket name containing the objects to delete.
+ //
+ // When using this API with an access point, you must direct requests to the
+ // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // When using this operation using an access point through the AWS SDKs, you
+ // provide the access point ARN in place of the bucket name. For more information
+ // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
+ // in the Amazon Simple Storage Service Developer Guide.
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
// Specifies whether you want to delete this object even if it has a Governance-type
- // object lock in place. You must have sufficient permissions to perform this
+ // Object Lock in place. You must have sufficient permissions to perform this
// operation.
BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"`
+ // Container for the request.
+ //
// Delete is a required field
Delete *Delete `locationName:"Delete" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
// The concatenation of the authentication device's serial number, a space,
- // and the value that is displayed on your authentication device.
+ // and the value that is displayed on your authentication device. Required to
+ // permanently delete a versioned object if versioning is configured with MFA
+ // delete enabled.
MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"`
- // Confirms that the requester knows that she or he will be charged for the
- // request. Bucket owners need not specify this parameter in their requests.
- // Documentation on downloading objects from requester pays buckets can be found
- // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
}
@@ -10844,11 +14359,29 @@ func (s *DeleteObjectsInput) SetRequestPayer(v string) *DeleteObjectsInput {
return s
}
+func (s *DeleteObjectsInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *DeleteObjectsInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type DeleteObjectsOutput struct {
_ struct{} `type:"structure"`
+ // Container element for a successful delete. It identifies the object that
+ // was successfully deleted.
Deleted []*DeletedObject `type:"list" flattened:"true"`
+ // Container for a failed delete operation that describes the object that Amazon
+ // S3 attempted to delete and the error it encountered.
Errors []*Error `locationName:"Error" type:"list" flattened:"true"`
// If present, indicates that the requester was successfully charged for the
@@ -10932,6 +14465,20 @@ func (s *DeletePublicAccessBlockInput) getBucket() (v string) {
return *s.Bucket
}
+func (s *DeletePublicAccessBlockInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *DeletePublicAccessBlockInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type DeletePublicAccessBlockOutput struct {
_ struct{} `type:"structure"`
}
@@ -10946,15 +14493,24 @@ func (s DeletePublicAccessBlockOutput) GoString() string {
return s.String()
}
+// Information about the deleted object.
type DeletedObject struct {
_ struct{} `type:"structure"`
+ // Specifies whether the versioned object that was permanently deleted was (true)
+ // or was not (false) a delete marker. In a simple DELETE, this header indicates
+ // whether (true) or not (false) a delete marker was created.
DeleteMarker *bool `type:"boolean"`
+ // The version ID of the delete marker created as a result of the DELETE operation.
+ // If you delete a specific object version, the value returned by this header
+ // is the version ID of the object version deleted.
DeleteMarkerVersionId *string `type:"string"`
+ // The name of the deleted object.
Key *string `min:"1" type:"string"`
+ // The version ID of the deleted object.
VersionId *string `type:"string"`
}
@@ -10993,7 +14549,7 @@ func (s *DeletedObject) SetVersionId(v string) *DeletedObject {
}
// Specifies information about where to publish analysis or configuration results
-// for an Amazon S3 bucket.
+// for an Amazon S3 bucket and S3 Replication Time Control (S3 RTC).
type Destination struct {
_ struct{} `type:"structure"`
@@ -11008,17 +14564,12 @@ type Destination struct {
// direct Amazon S3 to change replica ownership to the AWS account that owns
// the destination bucket by specifying the AccessControlTranslation property,
// this is the account ID of the destination bucket owner. For more information,
- // see Cross-Region Replication Additional Configuration: Change Replica Owner
- // (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr-change-owner.html) in
- // the Amazon Simple Storage Service Developer Guide.
+ // see Replication Additional Configuration: Changing the Replica Owner (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-change-owner.html)
+ // in the Amazon Simple Storage Service Developer Guide.
Account *string `type:"string"`
// The Amazon Resource Name (ARN) of the bucket where you want Amazon S3 to
- // store replicas of the object identified by the rule.
- //
- // A replication configuration can replicate objects to only one destination
- // bucket. If there are multiple rules in your replication configuration, all
- // rules must specify the same destination bucket.
+ // store the results.
//
// Bucket is a required field
Bucket *string `type:"string" required:"true"`
@@ -11027,9 +14578,19 @@ type Destination struct {
// is specified, you must specify this element.
EncryptionConfiguration *EncryptionConfiguration `type:"structure"`
- // The storage class to use when replicating objects, such as standard or reduced
- // redundancy. By default, Amazon S3 uses the storage class of the source object
- // to create the object replica.
+ // A container specifying replication metrics-related settings enabling metrics
+ // and Amazon S3 events for S3 Replication Time Control (S3 RTC). Must be specified
+ // together with a ReplicationTime block.
+ Metrics *Metrics `type:"structure"`
+
+ // A container specifying S3 Replication Time Control (S3 RTC), including whether
+ // S3 RTC is enabled and the time when all objects and operations on objects
+ // must be replicated. Must be specified together with a Metrics block.
+ ReplicationTime *ReplicationTime `type:"structure"`
+
+ // The storage class to use when replicating objects, such as S3 Standard or
+ // reduced redundancy. By default, Amazon S3 uses the storage class of the source
+ // object to create the object replica.
//
// For valid values, see the StorageClass element of the PUT Bucket replication
// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html)
@@ -11058,6 +14619,16 @@ func (s *Destination) Validate() error {
invalidParams.AddNested("AccessControlTranslation", err.(request.ErrInvalidParams))
}
}
+ if s.Metrics != nil {
+ if err := s.Metrics.Validate(); err != nil {
+ invalidParams.AddNested("Metrics", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.ReplicationTime != nil {
+ if err := s.ReplicationTime.Validate(); err != nil {
+ invalidParams.AddNested("ReplicationTime", err.(request.ErrInvalidParams))
+ }
+ }
if invalidParams.Len() > 0 {
return invalidParams
@@ -11096,19 +14667,30 @@ func (s *Destination) SetEncryptionConfiguration(v *EncryptionConfiguration) *De
return s
}
+// SetMetrics sets the Metrics field's value.
+func (s *Destination) SetMetrics(v *Metrics) *Destination {
+ s.Metrics = v
+ return s
+}
+
+// SetReplicationTime sets the ReplicationTime field's value.
+func (s *Destination) SetReplicationTime(v *ReplicationTime) *Destination {
+ s.ReplicationTime = v
+ return s
+}
+
// SetStorageClass sets the StorageClass field's value.
func (s *Destination) SetStorageClass(v string) *Destination {
s.StorageClass = &v
return s
}
-// Describes the server-side encryption that will be applied to the restore
-// results.
+// Contains the type of server-side encryption used.
type Encryption struct {
_ struct{} `type:"structure"`
// The server-side encryption algorithm used when storing job results in Amazon
- // S3 (e.g., AES256, aws:kms).
+ // S3 (for example, AES256, aws:kms).
//
// EncryptionType is a required field
EncryptionType *string `type:"string" required:"true" enum:"ServerSideEncryption"`
@@ -11117,8 +14699,11 @@ type Encryption struct {
// the encryption context for the restore results.
KMSContext *string `type:"string"`
- // If the encryption type is aws:kms, this optional value specifies the AWS
- // KMS key ID to use for encryption of job results.
+ // If the encryption type is aws:kms, this optional value specifies the ID of
+ // the symmetric customer managed AWS KMS CMK to use for encryption of job results.
+ // Amazon S3 only supports symmetric CMKs. For more information, see Using Symmetric
+ // and Asymmetric Keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html)
+ // in the AWS Key Management Service Developer Guide.
KMSKeyId *string `type:"string" sensitive:"true"`
}
@@ -11168,8 +14753,12 @@ func (s *Encryption) SetKMSKeyId(v string) *Encryption {
type EncryptionConfiguration struct {
_ struct{} `type:"structure"`
- // Specifies the AWS KMS Key ID (Key ARN or Alias ARN) for the destination bucket.
- // Amazon S3 uses this key to encrypt replica objects.
+ // Specifies the ID (Key ARN or Alias ARN) of the customer managed customer
+ // master key (CMK) stored in AWS Key Management Service (KMS) for the destination
+ // bucket. Amazon S3 uses this key to encrypt replica objects. Amazon S3 only
+ // supports symmetric customer managed CMKs. For more information, see Using
+ // Symmetric and Asymmetric Keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html)
+ // in the AWS Key Management Service Developer Guide.
ReplicaKmsKeyID *string `type:"string"`
}
@@ -11189,6 +14778,9 @@ func (s *EncryptionConfiguration) SetReplicaKmsKeyID(v string) *EncryptionConfig
return s
}
+// A message that indicates the request is complete and no more messages will
+// be sent. You should not assume that the request is complete until the client
+// receives an EndEvent.
type EndEvent struct {
_ struct{} `locationName:"EndEvent" type:"structure"`
}
@@ -11215,15 +14807,382 @@ func (s *EndEvent) UnmarshalEvent(
return nil
}
+// MarshalEvent marshals the type into an stream event value. This method
+// should only used internally within the SDK's EventStream handling.
+func (s *EndEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) {
+ msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType))
+ return msg, err
+}
+
+// Container for all error elements.
type Error struct {
_ struct{} `type:"structure"`
+ // The error code is a string that uniquely identifies an error condition. It
+ // is meant to be read and understood by programs that detect and handle errors
+ // by type.
+ //
+ // Amazon S3 error codes
+ //
+ // * Code: AccessDenied Description: Access Denied HTTP Status Code: 403
+ // Forbidden SOAP Fault Code Prefix: Client
+ //
+ // * Code: AccountProblem Description: There is a problem with your AWS account
+ // that prevents the operation from completing successfully. Contact AWS
+ // Support for further assistance. HTTP Status Code: 403 Forbidden SOAP Fault
+ // Code Prefix: Client
+ //
+ // * Code: AllAccessDisabled Description: All access to this Amazon S3 resource
+ // has been disabled. Contact AWS Support for further assistance. HTTP Status
+ // Code: 403 Forbidden SOAP Fault Code Prefix: Client
+ //
+ // * Code: AmbiguousGrantByEmailAddress Description: The email address you
+ // provided is associated with more than one account. HTTP Status Code: 400
+ // Bad Request SOAP Fault Code Prefix: Client
+ //
+ // * Code: AuthorizationHeaderMalformed Description: The authorization header
+ // you provided is invalid. HTTP Status Code: 400 Bad Request HTTP Status
+ // Code: N/A
+ //
+ // * Code: BadDigest Description: The Content-MD5 you specified did not match
+ // what we received. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix:
+ // Client
+ //
+ // * Code: BucketAlreadyExists Description: The requested bucket name is
+ // not available. The bucket namespace is shared by all users of the system.
+ // Please select a different name and try again. HTTP Status Code: 409 Conflict
+ // SOAP Fault Code Prefix: Client
+ //
+ // * Code: BucketAlreadyOwnedByYou Description: The bucket you tried to create
+ // already exists, and you own it. Amazon S3 returns this error in all AWS
+ // Regions except in the North Virginia Region. For legacy compatibility,
+ // if you re-create an existing bucket that you already own in the North
+ // Virginia Region, Amazon S3 returns 200 OK and resets the bucket access
+ // control lists (ACLs). Code: 409 Conflict (in all Regions except the North
+ // Virginia Region) SOAP Fault Code Prefix: Client
+ //
+ // * Code: BucketNotEmpty Description: The bucket you tried to delete is
+ // not empty. HTTP Status Code: 409 Conflict SOAP Fault Code Prefix: Client
+ //
+ // * Code: CredentialsNotSupported Description: This request does not support
+ // credentials. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix:
+ // Client
+ //
+ // * Code: CrossLocationLoggingProhibited Description: Cross-location logging
+ // not allowed. Buckets in one geographic location cannot log information
+ // to a bucket in another location. HTTP Status Code: 403 Forbidden SOAP
+ // Fault Code Prefix: Client
+ //
+ // * Code: EntityTooSmall Description: Your proposed upload is smaller than
+ // the minimum allowed object size. HTTP Status Code: 400 Bad Request SOAP
+ // Fault Code Prefix: Client
+ //
+ // * Code: EntityTooLarge Description: Your proposed upload exceeds the maximum
+ // allowed object size. HTTP Status Code: 400 Bad Request SOAP Fault Code
+ // Prefix: Client
+ //
+ // * Code: ExpiredToken Description: The provided token has expired. HTTP
+ // Status Code: 400 Bad Request SOAP Fault Code Prefix: Client
+ //
+ // * Code: IllegalVersioningConfigurationException Description: Indicates
+ // that the versioning configuration specified in the request is invalid.
+ // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client
+ //
+ // * Code: IncompleteBody Description: You did not provide the number of
+ // bytes specified by the Content-Length HTTP header HTTP Status Code: 400
+ // Bad Request SOAP Fault Code Prefix: Client
+ //
+ // * Code: IncorrectNumberOfFilesInPostRequest Description: POST requires
+ // exactly one file upload per request. HTTP Status Code: 400 Bad Request
+ // SOAP Fault Code Prefix: Client
+ //
+ // * Code: InlineDataTooLarge Description: Inline data exceeds the maximum
+ // allowed size. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix:
+ // Client
+ //
+ // * Code: InternalError Description: We encountered an internal error. Please
+ // try again. HTTP Status Code: 500 Internal Server Error SOAP Fault Code
+ // Prefix: Server
+ //
+ // * Code: InvalidAccessKeyId Description: The AWS access key ID you provided
+ // does not exist in our records. HTTP Status Code: 403 Forbidden SOAP Fault
+ // Code Prefix: Client
+ //
+ // * Code: InvalidAddressingHeader Description: You must specify the Anonymous
+ // role. HTTP Status Code: N/A SOAP Fault Code Prefix: Client
+ //
+ // * Code: InvalidArgument Description: Invalid Argument HTTP Status Code:
+ // 400 Bad Request SOAP Fault Code Prefix: Client
+ //
+ // * Code: InvalidBucketName Description: The specified bucket is not valid.
+ // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client
+ //
+ // * Code: InvalidBucketState Description: The request is not valid with
+ // the current state of the bucket. HTTP Status Code: 409 Conflict SOAP Fault
+ // Code Prefix: Client
+ //
+ // * Code: InvalidDigest Description: The Content-MD5 you specified is not
+ // valid. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client
+ //
+ // * Code: InvalidEncryptionAlgorithmError Description: The encryption request
+ // you specified is not valid. The valid value is AES256. HTTP Status Code:
+ // 400 Bad Request SOAP Fault Code Prefix: Client
+ //
+ // * Code: InvalidLocationConstraint Description: The specified location
+ // constraint is not valid. For more information about Regions, see How to
+ // Select a Region for Your Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro).
+ // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client
+ //
+ // * Code: InvalidObjectState Description: The operation is not valid for
+ // the current state of the object. HTTP Status Code: 403 Forbidden SOAP
+ // Fault Code Prefix: Client
+ //
+ // * Code: InvalidPart Description: One or more of the specified parts could
+ // not be found. The part might not have been uploaded, or the specified
+ // entity tag might not have matched the part's entity tag. HTTP Status Code:
+ // 400 Bad Request SOAP Fault Code Prefix: Client
+ //
+ // * Code: InvalidPartOrder Description: The list of parts was not in ascending
+ // order. Parts list must be specified in order by part number. HTTP Status
+ // Code: 400 Bad Request SOAP Fault Code Prefix: Client
+ //
+ // * Code: InvalidPayer Description: All access to this object has been disabled.
+ // Please contact AWS Support for further assistance. HTTP Status Code: 403
+ // Forbidden SOAP Fault Code Prefix: Client
+ //
+ // * Code: InvalidPolicyDocument Description: The content of the form does
+ // not meet the conditions specified in the policy document. HTTP Status
+ // Code: 400 Bad Request SOAP Fault Code Prefix: Client
+ //
+ // * Code: InvalidRange Description: The requested range cannot be satisfied.
+ // HTTP Status Code: 416 Requested Range Not Satisfiable SOAP Fault Code
+ // Prefix: Client
+ //
+ // * Code: InvalidRequest Description: Please use AWS4-HMAC-SHA256. HTTP
+ // Status Code: 400 Bad Request Code: N/A
+ //
+ // * Code: InvalidRequest Description: SOAP requests must be made over an
+ // HTTPS connection. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix:
+ // Client
+ //
+ // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration is
+ // not supported for buckets with non-DNS compliant names. HTTP Status Code:
+ // 400 Bad Request Code: N/A
+ //
+ // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration is
+ // not supported for buckets with periods (.) in their names. HTTP Status
+ // Code: 400 Bad Request Code: N/A
+ //
+ // * Code: InvalidRequest Description: Amazon S3 Transfer Accelerate endpoint
+ // only supports virtual style requests. HTTP Status Code: 400 Bad Request
+ // Code: N/A
+ //
+ // * Code: InvalidRequest Description: Amazon S3 Transfer Accelerate is not
+ // configured on this bucket. HTTP Status Code: 400 Bad Request Code: N/A
+ //
+ // * Code: InvalidRequest Description: Amazon S3 Transfer Accelerate is disabled
+ // on this bucket. HTTP Status Code: 400 Bad Request Code: N/A
+ //
+ // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration is
+ // not supported on this bucket. Contact AWS Support for more information.
+ // HTTP Status Code: 400 Bad Request Code: N/A
+ //
+ // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration cannot
+ // be enabled on this bucket. Contact AWS Support for more information. HTTP
+ // Status Code: 400 Bad Request Code: N/A
+ //
+ // * Code: InvalidSecurity Description: The provided security credentials
+ // are not valid. HTTP Status Code: 403 Forbidden SOAP Fault Code Prefix:
+ // Client
+ //
+ // * Code: InvalidSOAPRequest Description: The SOAP request body is invalid.
+ // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client
+ //
+ // * Code: InvalidStorageClass Description: The storage class you specified
+ // is not valid. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix:
+ // Client
+ //
+ // * Code: InvalidTargetBucketForLogging Description: The target bucket for
+ // logging does not exist, is not owned by you, or does not have the appropriate
+ // grants for the log-delivery group. HTTP Status Code: 400 Bad Request SOAP
+ // Fault Code Prefix: Client
+ //
+ // * Code: InvalidToken Description: The provided token is malformed or otherwise
+ // invalid. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client
+ //
+ // * Code: InvalidURI Description: Couldn't parse the specified URI. HTTP
+ // Status Code: 400 Bad Request SOAP Fault Code Prefix: Client
+ //
+ // * Code: KeyTooLongError Description: Your key is too long. HTTP Status
+ // Code: 400 Bad Request SOAP Fault Code Prefix: Client
+ //
+ // * Code: MalformedACLError Description: The XML you provided was not well-formed
+ // or did not validate against our published schema. HTTP Status Code: 400
+ // Bad Request SOAP Fault Code Prefix: Client
+ //
+ // * Code: MalformedPOSTRequest Description: The body of your POST request
+ // is not well-formed multipart/form-data. HTTP Status Code: 400 Bad Request
+ // SOAP Fault Code Prefix: Client
+ //
+ // * Code: MalformedXML Description: This happens when the user sends malformed
+ // XML (XML that doesn't conform to the published XSD) for the configuration.
+ // The error message is, "The XML you provided was not well-formed or did
+ // not validate against our published schema." HTTP Status Code: 400 Bad
+ // Request SOAP Fault Code Prefix: Client
+ //
+ // * Code: MaxMessageLengthExceeded Description: Your request was too big.
+ // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client
+ //
+ // * Code: MaxPostPreDataLengthExceededError Description: Your POST request
+ // fields preceding the upload file were too large. HTTP Status Code: 400
+ // Bad Request SOAP Fault Code Prefix: Client
+ //
+ // * Code: MetadataTooLarge Description: Your metadata headers exceed the
+ // maximum allowed metadata size. HTTP Status Code: 400 Bad Request SOAP
+ // Fault Code Prefix: Client
+ //
+ // * Code: MethodNotAllowed Description: The specified method is not allowed
+ // against this resource. HTTP Status Code: 405 Method Not Allowed SOAP Fault
+ // Code Prefix: Client
+ //
+ // * Code: MissingAttachment Description: A SOAP attachment was expected,
+ // but none were found. HTTP Status Code: N/A SOAP Fault Code Prefix: Client
+ //
+ // * Code: MissingContentLength Description: You must provide the Content-Length
+ // HTTP header. HTTP Status Code: 411 Length Required SOAP Fault Code Prefix:
+ // Client
+ //
+ // * Code: MissingRequestBodyError Description: This happens when the user
+ // sends an empty XML document as a request. The error message is, "Request
+ // body is empty." HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix:
+ // Client
+ //
+ // * Code: MissingSecurityElement Description: The SOAP 1.1 request is missing
+ // a security element. HTTP Status Code: 400 Bad Request SOAP Fault Code
+ // Prefix: Client
+ //
+ // * Code: MissingSecurityHeader Description: Your request is missing a required
+ // header. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client
+ //
+ // * Code: NoLoggingStatusForKey Description: There is no such thing as a
+ // logging status subresource for a key. HTTP Status Code: 400 Bad Request
+ // SOAP Fault Code Prefix: Client
+ //
+ // * Code: NoSuchBucket Description: The specified bucket does not exist.
+ // HTTP Status Code: 404 Not Found SOAP Fault Code Prefix: Client
+ //
+ // * Code: NoSuchBucketPolicy Description: The specified bucket does not
+ // have a bucket policy. HTTP Status Code: 404 Not Found SOAP Fault Code
+ // Prefix: Client
+ //
+ // * Code: NoSuchKey Description: The specified key does not exist. HTTP
+ // Status Code: 404 Not Found SOAP Fault Code Prefix: Client
+ //
+ // * Code: NoSuchLifecycleConfiguration Description: The lifecycle configuration
+ // does not exist. HTTP Status Code: 404 Not Found SOAP Fault Code Prefix:
+ // Client
+ //
+ // * Code: NoSuchUpload Description: The specified multipart upload does
+ // not exist. The upload ID might be invalid, or the multipart upload might
+ // have been aborted or completed. HTTP Status Code: 404 Not Found SOAP Fault
+ // Code Prefix: Client
+ //
+ // * Code: NoSuchVersion Description: Indicates that the version ID specified
+ // in the request does not match an existing version. HTTP Status Code: 404
+ // Not Found SOAP Fault Code Prefix: Client
+ //
+ // * Code: NotImplemented Description: A header you provided implies functionality
+ // that is not implemented. HTTP Status Code: 501 Not Implemented SOAP Fault
+ // Code Prefix: Server
+ //
+ // * Code: NotSignedUp Description: Your account is not signed up for the
+ // Amazon S3 service. You must sign up before you can use Amazon S3. You
+ // can sign up at the following URL: https://aws.amazon.com/s3 HTTP Status
+ // Code: 403 Forbidden SOAP Fault Code Prefix: Client
+ //
+ // * Code: OperationAborted Description: A conflicting conditional operation
+ // is currently in progress against this resource. Try again. HTTP Status
+ // Code: 409 Conflict SOAP Fault Code Prefix: Client
+ //
+ // * Code: PermanentRedirect Description: The bucket you are attempting to
+ // access must be addressed using the specified endpoint. Send all future
+ // requests to this endpoint. HTTP Status Code: 301 Moved Permanently SOAP
+ // Fault Code Prefix: Client
+ //
+ // * Code: PreconditionFailed Description: At least one of the preconditions
+ // you specified did not hold. HTTP Status Code: 412 Precondition Failed
+ // SOAP Fault Code Prefix: Client
+ //
+ // * Code: Redirect Description: Temporary redirect. HTTP Status Code: 307
+ // Moved Temporarily SOAP Fault Code Prefix: Client
+ //
+ // * Code: RestoreAlreadyInProgress Description: Object restore is already
+ // in progress. HTTP Status Code: 409 Conflict SOAP Fault Code Prefix: Client
+ //
+ // * Code: RequestIsNotMultiPartContent Description: Bucket POST must be
+ // of the enclosure-type multipart/form-data. HTTP Status Code: 400 Bad Request
+ // SOAP Fault Code Prefix: Client
+ //
+ // * Code: RequestTimeout Description: Your socket connection to the server
+ // was not read from or written to within the timeout period. HTTP Status
+ // Code: 400 Bad Request SOAP Fault Code Prefix: Client
+ //
+ // * Code: RequestTimeTooSkewed Description: The difference between the request
+ // time and the server's time is too large. HTTP Status Code: 403 Forbidden
+ // SOAP Fault Code Prefix: Client
+ //
+ // * Code: RequestTorrentOfBucketError Description: Requesting the torrent
+ // file of a bucket is not permitted. HTTP Status Code: 400 Bad Request SOAP
+ // Fault Code Prefix: Client
+ //
+ // * Code: SignatureDoesNotMatch Description: The request signature we calculated
+ // does not match the signature you provided. Check your AWS secret access
+ // key and signing method. For more information, see REST Authentication
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html)
+ // and SOAP Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/SOAPAuthentication.html)
+ // for details. HTTP Status Code: 403 Forbidden SOAP Fault Code Prefix: Client
+ //
+ // * Code: ServiceUnavailable Description: Reduce your request rate. HTTP
+ // Status Code: 503 Service Unavailable SOAP Fault Code Prefix: Server
+ //
+ // * Code: SlowDown Description: Reduce your request rate. HTTP Status Code:
+ // 503 Slow Down SOAP Fault Code Prefix: Server
+ //
+ // * Code: TemporaryRedirect Description: You are being redirected to the
+ // bucket while DNS updates. HTTP Status Code: 307 Moved Temporarily SOAP
+ // Fault Code Prefix: Client
+ //
+ // * Code: TokenRefreshRequired Description: The provided token must be refreshed.
+ // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client
+ //
+ // * Code: TooManyBuckets Description: You have attempted to create more
+ // buckets than allowed. HTTP Status Code: 400 Bad Request SOAP Fault Code
+ // Prefix: Client
+ //
+ // * Code: UnexpectedContent Description: This request does not support content.
+ // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client
+ //
+ // * Code: UnresolvableGrantByEmailAddress Description: The email address
+ // you provided does not match any account on record. HTTP Status Code: 400
+ // Bad Request SOAP Fault Code Prefix: Client
+ //
+ // * Code: UserKeyMustBeSpecified Description: The bucket POST must contain
+ // the specified field name. If it is specified, check the order of the fields.
+ // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client
Code *string `type:"string"`
+ // The error key.
Key *string `min:"1" type:"string"`
+ // The error message contains a generic description of the error condition in
+ // English. It is intended for a human audience. Simple programs display the
+ // message directly to the end user if they encounter an error condition they
+ // don't know how or don't care to handle. Sophisticated programs with more
+ // exhaustive error handling and proper internationalization are more likely
+ // to ignore the error message.
Message *string `type:"string"`
+ // The version ID of the error.
VersionId *string `type:"string"`
}
@@ -11261,6 +15220,7 @@ func (s *Error) SetVersionId(v string) *Error {
return s
}
+// The error information.
type ErrorDocument struct {
_ struct{} `type:"structure"`
@@ -11276,18 +15236,57 @@ func (s ErrorDocument) String() string {
}
// GoString returns the string representation
-func (s ErrorDocument) GoString() string {
+func (s ErrorDocument) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ErrorDocument) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ErrorDocument"}
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetKey sets the Key field's value.
+func (s *ErrorDocument) SetKey(v string) *ErrorDocument {
+ s.Key = &v
+ return s
+}
+
+// Optional configuration to replicate existing source bucket objects. For more
+// information, see Replicating Existing Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication)
+// in the Amazon S3 Developer Guide.
+type ExistingObjectReplication struct {
+ _ struct{} `type:"structure"`
+
+ // Status is a required field
+ Status *string `type:"string" required:"true" enum:"ExistingObjectReplicationStatus"`
+}
+
+// String returns the string representation
+func (s ExistingObjectReplication) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ExistingObjectReplication) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
-func (s *ErrorDocument) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "ErrorDocument"}
- if s.Key == nil {
- invalidParams.Add(request.NewErrParamRequired("Key"))
- }
- if s.Key != nil && len(*s.Key) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+func (s *ExistingObjectReplication) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ExistingObjectReplication"}
+ if s.Status == nil {
+ invalidParams.Add(request.NewErrParamRequired("Status"))
}
if invalidParams.Len() > 0 {
@@ -11296,9 +15295,9 @@ func (s *ErrorDocument) Validate() error {
return nil
}
-// SetKey sets the Key field's value.
-func (s *ErrorDocument) SetKey(v string) *ErrorDocument {
- s.Key = &v
+// SetStatus sets the Status field's value.
+func (s *ExistingObjectReplication) SetStatus(v string) *ExistingObjectReplication {
+ s.Status = &v
return s
}
@@ -11388,6 +15387,20 @@ func (s *GetBucketAccelerateConfigurationInput) getBucket() (v string) {
return *s.Bucket
}
+func (s *GetBucketAccelerateConfigurationInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *GetBucketAccelerateConfigurationInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type GetBucketAccelerateConfigurationOutput struct {
_ struct{} `type:"structure"`
@@ -11414,6 +15427,8 @@ func (s *GetBucketAccelerateConfigurationOutput) SetStatus(v string) *GetBucketA
type GetBucketAclInput struct {
_ struct{} `locationName:"GetBucketAclRequest" type:"structure"`
+ // Specifies the S3 bucket whose ACL is being requested.
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
}
@@ -11457,12 +15472,27 @@ func (s *GetBucketAclInput) getBucket() (v string) {
return *s.Bucket
}
+func (s *GetBucketAclInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *GetBucketAclInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type GetBucketAclOutput struct {
_ struct{} `type:"structure"`
// A list of grants.
Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"`
+ // Container for the bucket owner's display name and ID.
Owner *Owner `type:"structure"`
}
@@ -11550,6 +15580,20 @@ func (s *GetBucketAnalyticsConfigurationInput) SetId(v string) *GetBucketAnalyti
return s
}
+func (s *GetBucketAnalyticsConfigurationInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *GetBucketAnalyticsConfigurationInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type GetBucketAnalyticsConfigurationOutput struct {
_ struct{} `type:"structure" payload:"AnalyticsConfiguration"`
@@ -11576,6 +15620,8 @@ func (s *GetBucketAnalyticsConfigurationOutput) SetAnalyticsConfiguration(v *Ana
type GetBucketCorsInput struct {
_ struct{} `locationName:"GetBucketCorsRequest" type:"structure"`
+ // The bucket name for which to get the cors configuration.
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
}
@@ -11619,9 +15665,25 @@ func (s *GetBucketCorsInput) getBucket() (v string) {
return *s.Bucket
}
+func (s *GetBucketCorsInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *GetBucketCorsInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type GetBucketCorsOutput struct {
_ struct{} `type:"structure"`
+ // A set of origins and methods (cross-origin access that you want to allow).
+ // You can add up to 100 rules to the configuration.
CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true"`
}
@@ -11690,6 +15752,20 @@ func (s *GetBucketEncryptionInput) getBucket() (v string) {
return *s.Bucket
}
+func (s *GetBucketEncryptionInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *GetBucketEncryptionInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type GetBucketEncryptionOutput struct {
_ struct{} `type:"structure" payload:"ServerSideEncryptionConfiguration"`
@@ -11775,6 +15851,20 @@ func (s *GetBucketInventoryConfigurationInput) SetId(v string) *GetBucketInvento
return s
}
+func (s *GetBucketInventoryConfigurationInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *GetBucketInventoryConfigurationInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type GetBucketInventoryConfigurationOutput struct {
_ struct{} `type:"structure" payload:"InventoryConfiguration"`
@@ -11801,6 +15891,8 @@ func (s *GetBucketInventoryConfigurationOutput) SetInventoryConfiguration(v *Inv
type GetBucketLifecycleConfigurationInput struct {
_ struct{} `locationName:"GetBucketLifecycleConfigurationRequest" type:"structure"`
+ // The name of the bucket for which to get the lifecycle information.
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
}
@@ -11844,9 +15936,24 @@ func (s *GetBucketLifecycleConfigurationInput) getBucket() (v string) {
return *s.Bucket
}
+func (s *GetBucketLifecycleConfigurationInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *GetBucketLifecycleConfigurationInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type GetBucketLifecycleConfigurationOutput struct {
_ struct{} `type:"structure"`
+ // Container for a lifecycle rule.
Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true"`
}
@@ -11869,6 +15976,8 @@ func (s *GetBucketLifecycleConfigurationOutput) SetRules(v []*LifecycleRule) *Ge
type GetBucketLifecycleInput struct {
_ struct{} `locationName:"GetBucketLifecycleRequest" type:"structure"`
+ // The name of the bucket for which to get the lifecycle information.
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
}
@@ -11912,9 +16021,24 @@ func (s *GetBucketLifecycleInput) getBucket() (v string) {
return *s.Bucket
}
+func (s *GetBucketLifecycleInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *GetBucketLifecycleInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type GetBucketLifecycleOutput struct {
_ struct{} `type:"structure"`
+ // Container for a lifecycle rule.
Rules []*Rule `locationName:"Rule" type:"list" flattened:"true"`
}
@@ -11937,6 +16061,8 @@ func (s *GetBucketLifecycleOutput) SetRules(v []*Rule) *GetBucketLifecycleOutput
type GetBucketLocationInput struct {
_ struct{} `locationName:"GetBucketLocationRequest" type:"structure"`
+ // The name of the bucket for which to get the location.
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
}
@@ -11980,9 +16106,26 @@ func (s *GetBucketLocationInput) getBucket() (v string) {
return *s.Bucket
}
+func (s *GetBucketLocationInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *GetBucketLocationInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type GetBucketLocationOutput struct {
_ struct{} `type:"structure"`
+ // Specifies the Region where the bucket resides. For a list of all the Amazon
+ // S3 supported location constraints by Region, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region).
+ // Buckets in Region us-east-1 have a LocationConstraint of null.
LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"`
}
@@ -12005,6 +16148,8 @@ func (s *GetBucketLocationOutput) SetLocationConstraint(v string) *GetBucketLoca
type GetBucketLoggingInput struct {
_ struct{} `locationName:"GetBucketLoggingRequest" type:"structure"`
+ // The bucket name for which to get the logging information.
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
}
@@ -12048,6 +16193,20 @@ func (s *GetBucketLoggingInput) getBucket() (v string) {
return *s.Bucket
}
+func (s *GetBucketLoggingInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *GetBucketLoggingInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type GetBucketLoggingOutput struct {
_ struct{} `type:"structure"`
@@ -12136,6 +16295,20 @@ func (s *GetBucketMetricsConfigurationInput) SetId(v string) *GetBucketMetricsCo
return s
}
+func (s *GetBucketMetricsConfigurationInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *GetBucketMetricsConfigurationInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type GetBucketMetricsConfigurationOutput struct {
_ struct{} `type:"structure" payload:"MetricsConfiguration"`
@@ -12162,7 +16335,7 @@ func (s *GetBucketMetricsConfigurationOutput) SetMetricsConfiguration(v *Metrics
type GetBucketNotificationConfigurationRequest struct {
_ struct{} `locationName:"GetBucketNotificationConfigurationRequest" type:"structure"`
- // Name of the bucket to get the notification configuration for.
+ // Name of the bucket for which to get the notification configuration.
//
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@@ -12207,9 +16380,25 @@ func (s *GetBucketNotificationConfigurationRequest) getBucket() (v string) {
return *s.Bucket
}
+func (s *GetBucketNotificationConfigurationRequest) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *GetBucketNotificationConfigurationRequest) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type GetBucketPolicyInput struct {
_ struct{} `locationName:"GetBucketPolicyRequest" type:"structure"`
+ // The bucket name for which to get the bucket policy.
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
}
@@ -12253,6 +16442,20 @@ func (s *GetBucketPolicyInput) getBucket() (v string) {
return *s.Bucket
}
+func (s *GetBucketPolicyInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *GetBucketPolicyInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type GetBucketPolicyOutput struct {
_ struct{} `type:"structure" payload:"Policy"`
@@ -12324,6 +16527,20 @@ func (s *GetBucketPolicyStatusInput) getBucket() (v string) {
return *s.Bucket
}
+func (s *GetBucketPolicyStatusInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *GetBucketPolicyStatusInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type GetBucketPolicyStatusOutput struct {
_ struct{} `type:"structure" payload:"PolicyStatus"`
@@ -12350,6 +16567,8 @@ func (s *GetBucketPolicyStatusOutput) SetPolicyStatus(v *PolicyStatus) *GetBucke
type GetBucketReplicationInput struct {
_ struct{} `locationName:"GetBucketReplicationRequest" type:"structure"`
+ // The bucket name for which to get the replication information.
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
}
@@ -12393,6 +16612,20 @@ func (s *GetBucketReplicationInput) getBucket() (v string) {
return *s.Bucket
}
+func (s *GetBucketReplicationInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *GetBucketReplicationInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type GetBucketReplicationOutput struct {
_ struct{} `type:"structure" payload:"ReplicationConfiguration"`
@@ -12420,6 +16653,8 @@ func (s *GetBucketReplicationOutput) SetReplicationConfiguration(v *ReplicationC
type GetBucketRequestPaymentInput struct {
_ struct{} `locationName:"GetBucketRequestPaymentRequest" type:"structure"`
+ // The name of the bucket for which to get the payment request configuration
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
}
@@ -12463,6 +16698,20 @@ func (s *GetBucketRequestPaymentInput) getBucket() (v string) {
return *s.Bucket
}
+func (s *GetBucketRequestPaymentInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *GetBucketRequestPaymentInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type GetBucketRequestPaymentOutput struct {
_ struct{} `type:"structure"`
@@ -12489,6 +16738,8 @@ func (s *GetBucketRequestPaymentOutput) SetPayer(v string) *GetBucketRequestPaym
type GetBucketTaggingInput struct {
_ struct{} `locationName:"GetBucketTaggingRequest" type:"structure"`
+ // The name of the bucket for which to get the tagging information.
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
}
@@ -12532,9 +16783,25 @@ func (s *GetBucketTaggingInput) getBucket() (v string) {
return *s.Bucket
}
+func (s *GetBucketTaggingInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *GetBucketTaggingInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type GetBucketTaggingOutput struct {
_ struct{} `type:"structure"`
+ // Contains the tag set.
+ //
// TagSet is a required field
TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"`
}
@@ -12558,6 +16825,8 @@ func (s *GetBucketTaggingOutput) SetTagSet(v []*Tag) *GetBucketTaggingOutput {
type GetBucketVersioningInput struct {
_ struct{} `locationName:"GetBucketVersioningRequest" type:"structure"`
+ // The name of the bucket for which to get the versioning information.
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
}
@@ -12601,6 +16870,20 @@ func (s *GetBucketVersioningInput) getBucket() (v string) {
return *s.Bucket
}
+func (s *GetBucketVersioningInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *GetBucketVersioningInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type GetBucketVersioningOutput struct {
_ struct{} `type:"structure"`
@@ -12638,6 +16921,8 @@ func (s *GetBucketVersioningOutput) SetStatus(v string) *GetBucketVersioningOutp
type GetBucketWebsiteInput struct {
_ struct{} `locationName:"GetBucketWebsiteRequest" type:"structure"`
+ // The bucket name for which to get the website configuration.
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
}
@@ -12681,17 +16966,34 @@ func (s *GetBucketWebsiteInput) getBucket() (v string) {
return *s.Bucket
}
+func (s *GetBucketWebsiteInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *GetBucketWebsiteInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type GetBucketWebsiteOutput struct {
_ struct{} `type:"structure"`
+ // The object key name of the website error document to use for 4XX class errors.
ErrorDocument *ErrorDocument `type:"structure"`
+ // The name of the index document for the website (for example index.html).
IndexDocument *IndexDocument `type:"structure"`
// Specifies the redirect behavior of all requests to a website endpoint of
// an Amazon S3 bucket.
RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"`
+ // Rules that define when a redirect is applied and the redirect behavior.
RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"`
}
@@ -12732,16 +17034,28 @@ func (s *GetBucketWebsiteOutput) SetRoutingRules(v []*RoutingRule) *GetBucketWeb
type GetObjectAclInput struct {
_ struct{} `locationName:"GetObjectAclRequest" type:"structure"`
+ // The bucket name that contains the object for which to get the ACL information.
+ //
+ // When using this API with an access point, you must direct requests to the
+ // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // When using this operation using an access point through the AWS SDKs, you
+ // provide the access point ARN in place of the bucket name. For more information
+ // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
+ // in the Amazon Simple Storage Service Developer Guide.
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+ // The key of the object for which to get the ACL information.
+ //
// Key is a required field
Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
- // Confirms that the requester knows that she or he will be charged for the
- // request. Bucket owners need not specify this parameter in their requests.
- // Documentation on downloading objects from requester pays buckets can be found
- // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
// VersionId used to reference a specific version of the object.
@@ -12811,12 +17125,27 @@ func (s *GetObjectAclInput) SetVersionId(v string) *GetObjectAclInput {
return s
}
+func (s *GetObjectAclInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *GetObjectAclInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type GetObjectAclOutput struct {
_ struct{} `type:"structure"`
// A list of grants.
Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"`
+ // Container for the bucket owner's display name and ID.
Owner *Owner `type:"structure"`
// If present, indicates that the requester was successfully charged for the
@@ -12855,6 +17184,15 @@ func (s *GetObjectAclOutput) SetRequestCharged(v string) *GetObjectAclOutput {
type GetObjectInput struct {
_ struct{} `locationName:"GetObjectRequest" type:"structure"`
+ // The bucket name containing the object.
+ //
+ // When using this API with an access point, you must direct requests to the
+ // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // When using this operation using an access point through the AWS SDKs, you
+ // provide the access point ARN in place of the bucket name. For more information
+ // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
+ // in the Amazon Simple Storage Service Developer Guide.
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@@ -12874,6 +17212,8 @@ type GetObjectInput struct {
// otherwise return a 412 (precondition failed).
IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"`
+ // Key of the object to get.
+ //
// Key is a required field
Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
@@ -12883,13 +17223,17 @@ type GetObjectInput struct {
PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"`
// Downloads the specified range bytes of an object. For more information about
- // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.
+ // the HTTP Range header, see https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35
+ // (https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35).
+ //
+ // Amazon S3 doesn't support retrieving multiple ranges of data per GET request.
Range *string `location:"header" locationName:"Range" type:"string"`
- // Confirms that the requester knows that she or he will be charged for the
- // request. Bucket owners need not specify this parameter in their requests.
- // Documentation on downloading objects from requester pays buckets can be found
- // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
// Sets the Cache-Control header of the response.
@@ -12910,19 +17254,20 @@ type GetObjectInput struct {
// Sets the Expires header of the response.
ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp"`
- // Specifies the algorithm to use to when encrypting the object (e.g., AES256).
+ // Specifies the algorithm to use to when encrypting the object (for example,
+ // AES256).
SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
// Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
// data. This value is used to store the object and then it is discarded; Amazon
- // does not store the encryption key. The key must be appropriate for use with
- // the algorithm specified in the x-amz-server-side-encryption-customer-algorithm
+ // S3 does not store the encryption key. The key must be appropriate for use
+ // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm
// header.
SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
- // Amazon S3 uses this header for a message integrity check to ensure the encryption
- // key was transmitted without error.
+ // Amazon S3 uses this header for a message integrity check to ensure that the
+ // encryption key was transmitted without error.
SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
// VersionId used to reference a specific version of the object.
@@ -13089,10 +17434,32 @@ func (s *GetObjectInput) SetVersionId(v string) *GetObjectInput {
return s
}
+func (s *GetObjectInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *GetObjectInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type GetObjectLegalHoldInput struct {
_ struct{} `locationName:"GetObjectLegalHoldRequest" type:"structure"`
- // The bucket containing the object whose Legal Hold status you want to retrieve.
+ // The bucket name containing the object whose Legal Hold status you want to
+ // retrieve.
+ //
+ // When using this API with an access point, you must direct requests to the
+ // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // When using this operation using an access point through the AWS SDKs, you
+ // provide the access point ARN in place of the bucket name. For more information
+ // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
+ // in the Amazon Simple Storage Service Developer Guide.
//
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@@ -13102,10 +17469,11 @@ type GetObjectLegalHoldInput struct {
// Key is a required field
Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
- // Confirms that the requester knows that she or he will be charged for the
- // request. Bucket owners need not specify this parameter in their requests.
- // Documentation on downloading objects from requester pays buckets can be found
- // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
// The version ID of the object whose Legal Hold status you want to retrieve.
@@ -13175,6 +17543,20 @@ func (s *GetObjectLegalHoldInput) SetVersionId(v string) *GetObjectLegalHoldInpu
return s
}
+func (s *GetObjectLegalHoldInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *GetObjectLegalHoldInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type GetObjectLegalHoldOutput struct {
_ struct{} `type:"structure" payload:"LegalHold"`
@@ -13201,7 +17583,7 @@ func (s *GetObjectLegalHoldOutput) SetLegalHold(v *ObjectLockLegalHold) *GetObje
type GetObjectLockConfigurationInput struct {
_ struct{} `locationName:"GetObjectLockConfigurationRequest" type:"structure"`
- // The bucket whose object lock configuration you want to retrieve.
+ // The bucket whose Object Lock configuration you want to retrieve.
//
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@@ -13246,10 +17628,24 @@ func (s *GetObjectLockConfigurationInput) getBucket() (v string) {
return *s.Bucket
}
+func (s *GetObjectLockConfigurationInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *GetObjectLockConfigurationInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type GetObjectLockConfigurationOutput struct {
_ struct{} `type:"structure" payload:"ObjectLockConfiguration"`
- // The specified bucket's object lock configuration.
+ // The specified bucket's Object Lock configuration.
ObjectLockConfiguration *ObjectLockConfiguration `type:"structure"`
}
@@ -13272,6 +17668,7 @@ func (s *GetObjectLockConfigurationOutput) SetObjectLockConfiguration(v *ObjectL
type GetObjectOutput struct {
_ struct{} `type:"structure" payload:"Body"`
+ // Indicates that a range of bytes was specified.
AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"`
// Object data.
@@ -13305,11 +17702,11 @@ type GetObjectOutput struct {
DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"`
// An ETag is an opaque identifier assigned by a web server to a specific version
- // of a resource found at a URL
+ // of a resource found at a URL.
ETag *string `location:"header" locationName:"ETag" type:"string"`
// If the object expiration is configured (see PUT Bucket lifecycle), the response
- // includes this header. It includes the expiry-date and rule-id key value pairs
+ // includes this header. It includes the expiry-date and rule-id key-value pairs
// providing object expiration information. The value of the rule-id is URL
// encoded.
Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"`
@@ -13321,6 +17718,10 @@ type GetObjectOutput struct {
LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"`
// A map of metadata to store with the object in S3.
+ //
+ // By default unmarshaled keys are written as a map keys in following canonicalized format:
+ // the first letter and any letter following a hyphen will be capitalized, and the rest as lowercase.
+ // Set `aws.Config.LowerCaseHeaderMaps` to `true` to write unmarshaled keys to the map as lowercase.
Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
// This is set to the number of metadata entries not returned in x-amz-meta
@@ -13333,15 +17734,17 @@ type GetObjectOutput struct {
// returned if you have permission to view an object's legal hold status.
ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"`
- // The object lock mode currently in place for this object.
+ // The Object Lock mode currently in place for this object.
ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"`
- // The date and time when this object's object lock will expire.
+ // The date and time when this object's Object Lock will expire.
ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"`
// The count of parts this object has.
PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"`
+ // Amazon S3 can return this if your request involves a bucket that is either
+ // a source or destination in a replication rule.
ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"`
// If present, indicates that the requester was successfully charged for the
@@ -13358,18 +17761,21 @@ type GetObjectOutput struct {
SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
// If server-side encryption with a customer-provided encryption key was requested,
- // the response will include this header to provide round trip message integrity
+ // the response will include this header to provide round-trip message integrity
// verification of the customer-provided encryption key.
SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
- // If present, specifies the ID of the AWS Key Management Service (KMS) master
- // encryption key that was used for the object.
+ // If present, specifies the ID of the AWS Key Management Service (AWS KMS)
+ // symmetric customer managed customer master key (CMK) that was used for the
+ // object.
SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
- // The Server-side encryption algorithm used when storing this object in S3
- // (e.g., AES256, aws:kms).
+ // The server-side encryption algorithm used when storing this object in Amazon
+ // S3 (for example, AES256, aws:kms).
ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+ // Provides storage class information of the object. Amazon S3 returns this
+ // header for all objects except for S3 Standard storage class objects.
StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
// The number of tags, if any, on the object.
@@ -13583,7 +17989,15 @@ func (s *GetObjectOutput) SetWebsiteRedirectLocation(v string) *GetObjectOutput
type GetObjectRetentionInput struct {
_ struct{} `locationName:"GetObjectRetentionRequest" type:"structure"`
- // The bucket containing the object whose retention settings you want to retrieve.
+ // The bucket name containing the object whose retention settings you want to
+ // retrieve.
+ //
+ // When using this API with an access point, you must direct requests to the
+ // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // When using this operation using an access point through the AWS SDKs, you
+ // provide the access point ARN in place of the bucket name. For more information
+ // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
+ // in the Amazon Simple Storage Service Developer Guide.
//
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@@ -13593,10 +18007,11 @@ type GetObjectRetentionInput struct {
// Key is a required field
Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
- // Confirms that the requester knows that she or he will be charged for the
- // request. Bucket owners need not specify this parameter in their requests.
- // Documentation on downloading objects from requester pays buckets can be found
- // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
// The version ID for the object whose retention settings you want to retrieve.
@@ -13666,6 +18081,20 @@ func (s *GetObjectRetentionInput) SetVersionId(v string) *GetObjectRetentionInpu
return s
}
+func (s *GetObjectRetentionInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *GetObjectRetentionInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type GetObjectRetentionOutput struct {
_ struct{} `type:"structure" payload:"Retention"`
@@ -13692,12 +18121,24 @@ func (s *GetObjectRetentionOutput) SetRetention(v *ObjectLockRetention) *GetObje
type GetObjectTaggingInput struct {
_ struct{} `locationName:"GetObjectTaggingRequest" type:"structure"`
+ // The bucket name containing the object for which to get the tagging information.
+ //
+ // When using this API with an access point, you must direct requests to the
+ // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // When using this operation using an access point through the AWS SDKs, you
+ // provide the access point ARN in place of the bucket name. For more information
+ // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
+ // in the Amazon Simple Storage Service Developer Guide.
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+ // Object key for which to get the tagging information.
+ //
// Key is a required field
Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+ // The versionId of the object for which to get the tagging information.
VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
}
@@ -13758,12 +18199,29 @@ func (s *GetObjectTaggingInput) SetVersionId(v string) *GetObjectTaggingInput {
return s
}
+func (s *GetObjectTaggingInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *GetObjectTaggingInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type GetObjectTaggingOutput struct {
_ struct{} `type:"structure"`
+ // Contains the tag set.
+ //
// TagSet is a required field
TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"`
+ // The versionId of the object for which you got the tagging information.
VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
}
@@ -13792,16 +18250,22 @@ func (s *GetObjectTaggingOutput) SetVersionId(v string) *GetObjectTaggingOutput
type GetObjectTorrentInput struct {
_ struct{} `locationName:"GetObjectTorrentRequest" type:"structure"`
+ // The name of the bucket containing the object for which to get the torrent
+ // files.
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+ // The object key for which to get the information.
+ //
// Key is a required field
Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
- // Confirms that the requester knows that she or he will be charged for the
- // request. Bucket owners need not specify this parameter in their requests.
- // Documentation on downloading objects from requester pays buckets can be found
- // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
}
@@ -13862,9 +18326,24 @@ func (s *GetObjectTorrentInput) SetRequestPayer(v string) *GetObjectTorrentInput
return s
}
+func (s *GetObjectTorrentInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *GetObjectTorrentInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type GetObjectTorrentOutput struct {
_ struct{} `type:"structure" payload:"Body"`
+ // A Bencoded dictionary as defined by the BitTorrent specification
Body io.ReadCloser `type:"blob"`
// If present, indicates that the requester was successfully charged for the
@@ -13943,6 +18422,20 @@ func (s *GetPublicAccessBlockInput) getBucket() (v string) {
return *s.Bucket
}
+func (s *GetPublicAccessBlockInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *GetPublicAccessBlockInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type GetPublicAccessBlockOutput struct {
_ struct{} `type:"structure" payload:"PublicAccessBlockConfiguration"`
@@ -13967,10 +18460,11 @@ func (s *GetPublicAccessBlockOutput) SetPublicAccessBlockConfiguration(v *Public
return s
}
+// Container for S3 Glacier job parameters.
type GlacierJobParameters struct {
_ struct{} `type:"structure"`
- // Glacier retrieval tier at which the restore will be processed.
+ // S3 Glacier retrieval tier at which the restore will be processed.
//
// Tier is a required field
Tier *string `type:"string" required:"true" enum:"Tier"`
@@ -14005,9 +18499,11 @@ func (s *GlacierJobParameters) SetTier(v string) *GlacierJobParameters {
return s
}
+// Container for grant information.
type Grant struct {
_ struct{} `type:"structure"`
+ // The person being granted permissions.
Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"`
// Specifies the permission given to the grantee.
@@ -14051,6 +18547,7 @@ func (s *Grant) SetPermission(v string) *Grant {
return s
}
+// Container for the person being granted permissions.
type Grantee struct {
_ struct{} `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"`
@@ -14058,6 +18555,29 @@ type Grantee struct {
DisplayName *string `type:"string"`
// Email address of the grantee.
+ //
+ // Using email addresses to specify a grantee is only supported in the following
+ // AWS Regions:
+ //
+ // * US East (N. Virginia)
+ //
+ // * US West (N. California)
+ //
+ // * US West (Oregon)
+ //
+ // * Asia Pacific (Singapore)
+ //
+ // * Asia Pacific (Sydney)
+ //
+ // * Asia Pacific (Tokyo)
+ //
+ // * Europe (Ireland)
+ //
+ // * South America (São Paulo)
+ //
+ // For a list of all the Amazon S3 supported Regions and endpoints, see Regions
+ // and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region)
+ // in the AWS General Reference.
EmailAddress *string `type:"string"`
// The canonical user ID of the grantee.
@@ -14128,6 +18648,8 @@ func (s *Grantee) SetURI(v string) *Grantee {
type HeadBucketInput struct {
_ struct{} `locationName:"HeadBucketRequest" type:"structure"`
+ // The bucket name.
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
}
@@ -14171,6 +18693,20 @@ func (s *HeadBucketInput) getBucket() (v string) {
return *s.Bucket
}
+func (s *HeadBucketInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *HeadBucketInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type HeadBucketOutput struct {
_ struct{} `type:"structure"`
}
@@ -14188,6 +18724,8 @@ func (s HeadBucketOutput) GoString() string {
type HeadObjectInput struct {
_ struct{} `locationName:"HeadObjectRequest" type:"structure"`
+ // The name of the bucket containing the object.
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@@ -14207,6 +18745,8 @@ type HeadObjectInput struct {
// otherwise return a 412 (precondition failed).
IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"`
+ // The object key.
+ //
// Key is a required field
Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
@@ -14217,28 +18757,32 @@ type HeadObjectInput struct {
PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"`
// Downloads the specified range bytes of an object. For more information about
- // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.
+ // the HTTP Range header, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.
+ //
+ // Amazon S3 doesn't support retrieving multiple ranges of data per GET request.
Range *string `location:"header" locationName:"Range" type:"string"`
- // Confirms that the requester knows that she or he will be charged for the
- // request. Bucket owners need not specify this parameter in their requests.
- // Documentation on downloading objects from requester pays buckets can be found
- // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
- // Specifies the algorithm to use to when encrypting the object (e.g., AES256).
+ // Specifies the algorithm to use to when encrypting the object (for example,
+ // AES256).
SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
// Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
// data. This value is used to store the object and then it is discarded; Amazon
- // does not store the encryption key. The key must be appropriate for use with
- // the algorithm specified in the x-amz-server-side-encryption-customer-algorithm
+ // S3 does not store the encryption key. The key must be appropriate for use
+ // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm
// header.
SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
- // Amazon S3 uses this header for a message integrity check to ensure the encryption
- // key was transmitted without error.
+ // Amazon S3 uses this header for a message integrity check to ensure that the
+ // encryption key was transmitted without error.
SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
// VersionId used to reference a specific version of the object.
@@ -14369,9 +18913,24 @@ func (s *HeadObjectInput) SetVersionId(v string) *HeadObjectInput {
return s
}
+func (s *HeadObjectInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *HeadObjectInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type HeadObjectOutput struct {
_ struct{} `type:"structure"`
+ // Indicates that a range of bytes was specified.
AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"`
// Specifies caching behavior along the request/reply chain.
@@ -14399,11 +18958,11 @@ type HeadObjectOutput struct {
DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"`
// An ETag is an opaque identifier assigned by a web server to a specific version
- // of a resource found at a URL
+ // of a resource found at a URL.
ETag *string `location:"header" locationName:"ETag" type:"string"`
// If the object expiration is configured (see PUT Bucket lifecycle), the response
- // includes this header. It includes the expiry-date and rule-id key value pairs
+ // includes this header. It includes the expiry-date and rule-id key-value pairs
// providing object expiration information. The value of the rule-id is URL
// encoded.
Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"`
@@ -14415,6 +18974,10 @@ type HeadObjectOutput struct {
LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"`
// A map of metadata to store with the object in S3.
+ //
+ // By default unmarshaled keys are written as a map keys in following canonicalized format:
+ // the first letter and any letter following a hyphen will be capitalized, and the rest as lowercase.
+ // Set `aws.Config.LowerCaseHeaderMaps` to `true` to write unmarshaled keys to the map as lowercase.
Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
// This is set to the number of metadata entries not returned in x-amz-meta
@@ -14423,26 +18986,70 @@ type HeadObjectOutput struct {
// you can create metadata whose values are not legal HTTP headers.
MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"`
- // The Legal Hold status for the specified object.
+ // Specifies whether a legal hold is in effect for this object. This header
+ // is only returned if the requester has the s3:GetObjectLegalHold permission.
+ // This header is not returned if the specified version of this object has never
+ // had a legal hold applied. For more information about S3 Object Lock, see
+ // Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html).
ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"`
- // The object lock mode currently in place for this object.
+ // The Object Lock mode, if any, that's in effect for this object. This header
+ // is only returned if the requester has the s3:GetObjectRetention permission.
+ // For more information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html).
ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"`
- // The date and time when this object's object lock expires.
+ // The date and time when the Object Lock retention period expires. This header
+ // is only returned if the requester has the s3:GetObjectRetention permission.
ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"`
// The count of parts this object has.
PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"`
+ // Amazon S3 can return this header if your request involves a bucket that is
+ // either a source or destination in a replication rule.
+ //
+ // In replication, you have a source bucket on which you configure replication
+ // and destination bucket where Amazon S3 stores object replicas. When you request
+ // an object (GetObject) or object metadata (HeadObject) from these buckets,
+ // Amazon S3 will return the x-amz-replication-status header in the response
+ // as follows:
+ //
+ // * If requesting an object from the source bucket — Amazon S3 will return
+ // the x-amz-replication-status header if the object in your request is eligible
+ // for replication. For example, suppose that in your replication configuration,
+ // you specify object prefix TaxDocs requesting Amazon S3 to replicate objects
+ // with key prefix TaxDocs. Any objects you upload with this key name prefix,
+ // for example TaxDocs/document1.pdf, are eligible for replication. For any
+ // object request with this key name prefix, Amazon S3 will return the x-amz-replication-status
+ // header with value PENDING, COMPLETED or FAILED indicating object replication
+ // status.
+ //
+ // * If requesting an object from the destination bucket — Amazon S3 will
+ // return the x-amz-replication-status header with value REPLICA if the object
+ // in your request is a replica that Amazon S3 created.
+ //
+ // For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html).
ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"`
// If present, indicates that the requester was successfully charged for the
// request.
RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
- // Provides information about object restoration operation and expiration time
- // of the restored object copy.
+ // If the object is an archived object (an object whose storage class is GLACIER),
+ // the response includes this header if either the archive restoration is in
+ // progress (see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html)
+ // or an archive copy is already restored.
+ //
+ // If an archive copy is already restored, the header value indicates when Amazon
+ // S3 is scheduled to delete the object copy. For example:
+ //
+ // x-amz-restore: ongoing-request="false", expiry-date="Fri, 23 Dec 2012 00:00:00
+ // GMT"
+ //
+ // If the object restoration is in progress, the header returns the value ongoing-request="true".
+ //
+ // For more information about archiving objects, see Transitioning Objects:
+ // General Considerations (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-transition-general-considerations).
Restore *string `location:"header" locationName:"x-amz-restore" type:"string"`
// If server-side encryption with a customer-provided encryption key was requested,
@@ -14451,18 +19058,25 @@ type HeadObjectOutput struct {
SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
// If server-side encryption with a customer-provided encryption key was requested,
- // the response will include this header to provide round trip message integrity
+ // the response will include this header to provide round-trip message integrity
// verification of the customer-provided encryption key.
SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
- // If present, specifies the ID of the AWS Key Management Service (KMS) master
- // encryption key that was used for the object.
+ // If present, specifies the ID of the AWS Key Management Service (AWS KMS)
+ // symmetric customer managed customer master key (CMK) that was used for the
+ // object.
SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
- // The Server-side encryption algorithm used when storing this object in S3
- // (e.g., AES256, aws:kms).
+ // If the object is stored using server-side encryption either with an AWS KMS
+ // customer master key (CMK) or an Amazon S3-managed encryption key, the response
+ // includes this header with the value of the server-side encryption algorithm
+ // used when storing this object in Amazon S3 (for example, AES256, aws:kms).
ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+ // Provides storage class information of the object. Amazon S3 returns this
+ // header for all objects except for S3 Standard storage class objects.
+ //
+ // For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html).
StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
// Version of the object.
@@ -14652,13 +19266,15 @@ func (s *HeadObjectOutput) SetWebsiteRedirectLocation(v string) *HeadObjectOutpu
return s
}
+// Container for the Suffix element.
type IndexDocument struct {
_ struct{} `type:"structure"`
// A suffix that is appended to a request that is for a directory on the website
- // endpoint (e.g. if the suffix is index.html and you make a request to samplebucket/images/
- // the data that is returned will be for the object with the key name images/index.html)
- // The suffix must not be empty and must not include a slash character.
+ // endpoint (for example,if the suffix is index.html and you make a request
+ // to samplebucket/images/ the data that is returned will be for the object
+ // with the key name images/index.html) The suffix must not be empty and must
+ // not include a slash character.
//
// Suffix is a required field
Suffix *string `type:"string" required:"true"`
@@ -14693,6 +19309,7 @@ func (s *IndexDocument) SetSuffix(v string) *IndexDocument {
return s
}
+// Container element that identifies who initiated the multipart upload.
type Initiator struct {
_ struct{} `type:"structure"`
@@ -14913,6 +19530,7 @@ func (s *InventoryConfiguration) SetSchedule(v *InventorySchedule) *InventoryCon
return s
}
+// Specifies the inventory configuration for an Amazon S3 bucket.
type InventoryDestination struct {
_ struct{} `type:"structure"`
@@ -14962,10 +19580,10 @@ func (s *InventoryDestination) SetS3BucketDestination(v *InventoryS3BucketDestin
type InventoryEncryption struct {
_ struct{} `type:"structure"`
- // Specifies the use of SSE-KMS to encrypt delivered Inventory reports.
+ // Specifies the use of SSE-KMS to encrypt delivered inventory reports.
SSEKMS *SSEKMS `locationName:"SSE-KMS" type:"structure"`
- // Specifies the use of SSE-S3 to encrypt delivered Inventory reports.
+ // Specifies the use of SSE-S3 to encrypt delivered inventory reports.
SSES3 *SSES3 `locationName:"SSE-S3" type:"structure"`
}
@@ -15006,6 +19624,8 @@ func (s *InventoryEncryption) SetSSES3(v *SSES3) *InventoryEncryption {
return s
}
+// Specifies an inventory filter. The inventory only includes objects that meet
+// the filter's criteria.
type InventoryFilter struct {
_ struct{} `type:"structure"`
@@ -15044,13 +19664,19 @@ func (s *InventoryFilter) SetPrefix(v string) *InventoryFilter {
return s
}
+// Contains the bucket name, file format, bucket owner (optional), and prefix
+// (optional) where inventory results are published.
type InventoryS3BucketDestination struct {
_ struct{} `type:"structure"`
- // The ID of the account that owns the destination bucket.
+ // The account ID that owns the destination S3 bucket. If no account ID is provided,
+ // the owner is not validated before exporting data.
+ //
+ // Although this value is optional, we strongly recommend that you set it to
+ // help prevent problems if the destination bucket ownership changes.
AccountId *string `type:"string"`
- // The Amazon resource name (ARN) of the bucket where inventory results will
+ // The Amazon Resource Name (ARN) of the bucket where inventory results will
// be published.
//
// Bucket is a required field
@@ -15137,6 +19763,7 @@ func (s *InventoryS3BucketDestination) SetPrefix(v string) *InventoryS3BucketDes
return s
}
+// Specifies the schedule for generating inventory results.
type InventorySchedule struct {
_ struct{} `type:"structure"`
@@ -15175,6 +19802,7 @@ func (s *InventorySchedule) SetFrequency(v string) *InventorySchedule {
return s
}
+// Specifies JSON as object's input serialization format.
type JSONInput struct {
_ struct{} `type:"structure"`
@@ -15198,10 +19826,12 @@ func (s *JSONInput) SetType(v string) *JSONInput {
return s
}
+// Specifies JSON as request's output serialization format.
type JSONOutput struct {
_ struct{} `type:"structure"`
- // The value used to separate individual records in the output.
+ // The value used to separate individual records in the output. If no value
+ // is specified, Amazon S3 uses a newline character ('\n').
RecordDelimiter *string `type:"string"`
}
@@ -15225,7 +19855,7 @@ func (s *JSONOutput) SetRecordDelimiter(v string) *JSONOutput {
type KeyFilter struct {
_ struct{} `type:"structure"`
- // A list of containers for the key value pair that defines the criteria for
+ // A list of containers for the key-value pair that defines the criteria for
// the filter rule.
FilterRules []*FilterRule `locationName:"FilterRule" type:"list" flattened:"true"`
}
@@ -15323,9 +19953,12 @@ func (s *LambdaFunctionConfiguration) SetLambdaFunctionArn(v string) *LambdaFunc
return s
}
+// Container for lifecycle rules. You can add as many as 1000 rules.
type LifecycleConfiguration struct {
_ struct{} `type:"structure"`
+ // Specifies lifecycle configuration rules for an Amazon S3 bucket.
+ //
// Rules is a required field
Rules []*Rule `locationName:"Rule" type:"list" flattened:"true" required:"true"`
}
@@ -15369,6 +20002,7 @@ func (s *LifecycleConfiguration) SetRules(v []*Rule) *LifecycleConfiguration {
return s
}
+// Container for the expiration for the lifecycle of the object.
type LifecycleExpiration struct {
_ struct{} `type:"structure"`
@@ -15415,6 +20049,7 @@ func (s *LifecycleExpiration) SetExpiredObjectDeleteMarker(v bool) *LifecycleExp
return s
}
+// A lifecycle rule for individual objects in an Amazon S3 bucket.
type LifecycleRule struct {
_ struct{} `type:"structure"`
@@ -15425,6 +20060,8 @@ type LifecycleRule struct {
// in the Amazon Simple Storage Service Developer Guide.
AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"`
+ // Specifies the expiration for the lifecycle of the object in the form of date,
+ // days and, whether the object has a delete marker.
Expiration *LifecycleExpiration `type:"structure"`
// The Filter is used to identify objects that a Lifecycle Rule applies to.
@@ -15441,6 +20078,11 @@ type LifecycleRule struct {
// period in the object's lifetime.
NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"`
+ // Specifies the transition rule for the lifecycle rule that describes when
+ // noncurrent objects transition to a specific storage class. If your bucket
+ // is versioning-enabled (or versioning is suspended), you can set this action
+ // to request that Amazon S3 transition noncurrent object versions to a specific
+ // storage class at a set period in the object's lifetime.
NoncurrentVersionTransitions []*NoncurrentVersionTransition `locationName:"NoncurrentVersionTransition" type:"list" flattened:"true"`
// Prefix identifying one or more objects to which the rule applies. This is
@@ -15455,6 +20097,7 @@ type LifecycleRule struct {
// Status is a required field
Status *string `type:"string" required:"true" enum:"ExpirationStatus"`
+ // Specifies when an Amazon S3 object transitions to a specified storage class.
Transitions []*Transition `locationName:"Transition" type:"list" flattened:"true"`
}
@@ -15546,6 +20189,7 @@ func (s *LifecycleRule) SetTransitions(v []*Transition) *LifecycleRule {
type LifecycleRuleAndOperator struct {
_ struct{} `type:"structure"`
+ // Prefix identifying one or more objects to which the rule applies.
Prefix *string `type:"string"`
// All of these tags must exist in the object's tag set in order for the rule
@@ -15718,13 +20362,28 @@ func (s *ListBucketAnalyticsConfigurationsInput) SetContinuationToken(v string)
return s
}
+func (s *ListBucketAnalyticsConfigurationsInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *ListBucketAnalyticsConfigurationsInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type ListBucketAnalyticsConfigurationsOutput struct {
_ struct{} `type:"structure"`
// The list of analytics configurations for a bucket.
AnalyticsConfigurationList []*AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"list" flattened:"true"`
- // The ContinuationToken that represents where this request began.
+ // The marker that is used as a starting point for this analytics configuration
+ // list response. This value is present if it was sent in the request.
ContinuationToken *string `type:"string"`
// Indicates whether the returned list of analytics configurations is complete.
@@ -15832,6 +20491,20 @@ func (s *ListBucketInventoryConfigurationsInput) SetContinuationToken(v string)
return s
}
+func (s *ListBucketInventoryConfigurationsInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *ListBucketInventoryConfigurationsInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type ListBucketInventoryConfigurationsOutput struct {
_ struct{} `type:"structure"`
@@ -15842,8 +20515,9 @@ type ListBucketInventoryConfigurationsOutput struct {
// The list of inventory configurations for a bucket.
InventoryConfigurationList []*InventoryConfiguration `locationName:"InventoryConfiguration" type:"list" flattened:"true"`
- // Indicates whether the returned list of inventory configurations is truncated
- // in this response. A value of true indicates that the list is truncated.
+ // Tells whether the returned list of inventory configurations is complete.
+ // A value of true indicates that the list is not complete and the NextContinuationToken
+ // is provided for a subsequent request.
IsTruncated *bool `type:"boolean"`
// The marker used to continue this inventory configuration listing. Use the
@@ -15946,6 +20620,20 @@ func (s *ListBucketMetricsConfigurationsInput) SetContinuationToken(v string) *L
return s
}
+func (s *ListBucketMetricsConfigurationsInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *ListBucketMetricsConfigurationsInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type ListBucketMetricsConfigurationsOutput struct {
_ struct{} `type:"structure"`
@@ -16019,8 +20707,10 @@ func (s ListBucketsInput) GoString() string {
type ListBucketsOutput struct {
_ struct{} `type:"structure"`
+ // The list of buckets owned by the requestor.
Buckets []*Bucket `locationNameList:"Bucket" type:"list"`
+ // The owner of the buckets listed.
Owner *Owner `type:"structure"`
}
@@ -16049,10 +20739,26 @@ func (s *ListBucketsOutput) SetOwner(v *Owner) *ListBucketsOutput {
type ListMultipartUploadsInput struct {
_ struct{} `locationName:"ListMultipartUploadsRequest" type:"structure"`
+ // Name of the bucket to which the multipart upload was initiated.
+ //
+ // When using this API with an access point, you must direct requests to the
+ // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // When using this operation using an access point through the AWS SDKs, you
+ // provide the access point ARN in place of the bucket name. For more information
+ // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
+ // in the Amazon Simple Storage Service Developer Guide.
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
// Character you use to group keys.
+ //
+ // All keys that contain the same string between the prefix, if specified, and
+ // the first occurrence of the delimiter after the prefix are grouped under
+ // a single result element, CommonPrefixes. If you don't specify the prefix
+ // parameter, then the substring starts at the beginning of the key. The keys
+ // that are grouped under CommonPrefixes result element are not returned elsewhere
+ // in the response.
Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"`
// Requests Amazon S3 to encode the object keys in the response and specifies
@@ -16065,6 +20771,13 @@ type ListMultipartUploadsInput struct {
// Together with upload-id-marker, this parameter specifies the multipart upload
// after which listing should begin.
+ //
+ // If upload-id-marker is not specified, only the keys lexicographically greater
+ // than the specified key-marker will be included in the list.
+ //
+ // If upload-id-marker is specified, any multipart uploads for a key equal to
+ // the key-marker might also be included, provided those multipart uploads have
+ // upload IDs lexicographically greater than the specified upload-id-marker.
KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"`
// Sets the maximum number of multipart uploads, from 1 to 1,000, to return
@@ -16073,12 +20786,16 @@ type ListMultipartUploadsInput struct {
MaxUploads *int64 `location:"querystring" locationName:"max-uploads" type:"integer"`
// Lists in-progress uploads only for those keys that begin with the specified
- // prefix.
+ // prefix. You can use prefixes to separate a bucket into different grouping
+ // of keys. (You can think of using prefix to make groups in the same way you'd
+ // use a folder in a file system.)
Prefix *string `location:"querystring" locationName:"prefix" type:"string"`
// Together with key-marker, specifies the multipart upload after which listing
// should begin. If key-marker is not specified, the upload-id-marker parameter
- // is ignored.
+ // is ignored. Otherwise, any multipart uploads for a key equal to the key-marker
+ // might be included in the list only if they have an upload ID lexicographically
+ // greater than the specified upload-id-marker.
UploadIdMarker *string `location:"querystring" locationName:"upload-id-marker" type:"string"`
}
@@ -16157,17 +20874,42 @@ func (s *ListMultipartUploadsInput) SetUploadIdMarker(v string) *ListMultipartUp
return s
}
+func (s *ListMultipartUploadsInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *ListMultipartUploadsInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type ListMultipartUploadsOutput struct {
_ struct{} `type:"structure"`
// Name of the bucket to which the multipart upload was initiated.
Bucket *string `type:"string"`
+ // If you specify a delimiter in the request, then the result returns each distinct
+ // key prefix containing the delimiter in a CommonPrefixes element. The distinct
+ // key prefixes are returned in the Prefix child element.
CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"`
+ // Contains the delimiter you specified in the request. If you don't specify
+ // a delimiter in your request, this element is absent from the response.
Delimiter *string `type:"string"`
// Encoding type used by Amazon S3 to encode object keys in the response.
+ //
+ // If you specify encoding-type request parameter, Amazon S3 includes this element
+ // in the response, and returns encoded key name values in the following response
+ // elements:
+ //
+ // Delimiter, KeyMarker, Prefix, NextKeyMarker, Key.
EncodingType *string `type:"string" enum:"EncodingType"`
// Indicates whether the returned list of multipart uploads is truncated. A
@@ -16198,6 +20940,8 @@ type ListMultipartUploadsOutput struct {
// Upload ID after which listing began.
UploadIdMarker *string `type:"string"`
+ // Container for elements related to a particular multipart upload. A response
+ // can contain zero or more Upload elements.
Uploads []*MultipartUpload `locationName:"Upload" type:"list" flattened:"true"`
}
@@ -16293,10 +21037,23 @@ func (s *ListMultipartUploadsOutput) SetUploads(v []*MultipartUpload) *ListMulti
type ListObjectVersionsInput struct {
_ struct{} `locationName:"ListObjectVersionsRequest" type:"structure"`
+ // The bucket name that contains the objects.
+ //
+ // When using this API with an access point, you must direct requests to the
+ // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // When using this operation using an access point through the AWS SDKs, you
+ // provide the access point ARN in place of the bucket name. For more information
+ // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
+ // in the Amazon Simple Storage Service Developer Guide.
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // A delimiter is a character you use to group keys.
+ // A delimiter is a character that you specify to group keys. All keys that
+ // contain the same string between the prefix and the first occurrence of the
+ // delimiter are grouped under a single result element in CommonPrefixes. These
+ // groups are counted as one result against the max-keys limitation. These keys
+ // are not returned elsewhere in the response.
Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"`
// Requests Amazon S3 to encode the object keys in the response and specifies
@@ -16310,11 +21067,19 @@ type ListObjectVersionsInput struct {
// Specifies the key to start with when listing objects in a bucket.
KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"`
- // Sets the maximum number of keys returned in the response. The response might
- // contain fewer keys but will never contain more.
+ // Sets the maximum number of keys returned in the response. By default the
+ // API returns up to 1,000 key names. The response might contain fewer keys
+ // but will never contain more. If additional keys satisfy the search criteria,
+ // but were not returned because max-keys was exceeded, the response contains
+ // true. To return the additional keys, see key-marker
+ // and version-id-marker.
MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"`
- // Limits the response to keys that begin with the specified prefix.
+ // Use this parameter to select only those keys that begin with the specified
+ // prefix. You can use prefixes to separate a bucket into different groupings
+ // of keys. (You can think of using prefix to make groups in the same way you'd
+ // use a folder in a file system.) You can use prefix with delimiter to roll
+ // up numerous objects into a single result under CommonPrefixes.
Prefix *string `location:"querystring" locationName:"prefix" type:"string"`
// Specifies the object version you want to start listing from.
@@ -16396,42 +21161,81 @@ func (s *ListObjectVersionsInput) SetVersionIdMarker(v string) *ListObjectVersio
return s
}
+func (s *ListObjectVersionsInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *ListObjectVersionsInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type ListObjectVersionsOutput struct {
_ struct{} `type:"structure"`
+ // All of the keys rolled up into a common prefix count as a single return when
+ // calculating the number of returns.
CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"`
+ // Container for an object that is a delete marker.
DeleteMarkers []*DeleteMarkerEntry `locationName:"DeleteMarker" type:"list" flattened:"true"`
+ // The delimiter grouping the included keys. A delimiter is a character that
+ // you specify to group keys. All keys that contain the same string between
+ // the prefix and the first occurrence of the delimiter are grouped under a
+ // single result element in CommonPrefixes. These groups are counted as one
+ // result against the max-keys limitation. These keys are not returned elsewhere
+ // in the response.
Delimiter *string `type:"string"`
- // Encoding type used by Amazon S3 to encode object keys in the response.
+ // Encoding type used by Amazon S3 to encode object key names in the XML response.
+ //
+ // If you specify encoding-type request parameter, Amazon S3 includes this element
+ // in the response, and returns encoded key name values in the following response
+ // elements:
+ //
+ // KeyMarker, NextKeyMarker, Prefix, Key, and Delimiter.
EncodingType *string `type:"string" enum:"EncodingType"`
- // A flag that indicates whether or not Amazon S3 returned all of the results
- // that satisfied the search criteria. If your results were truncated, you can
- // make a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker
+ // A flag that indicates whether Amazon S3 returned all of the results that
+ // satisfied the search criteria. If your results were truncated, you can make
+ // a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker
// response parameters as a starting place in another request to return the
// rest of the results.
IsTruncated *bool `type:"boolean"`
- // Marks the last Key returned in a truncated response.
+ // Marks the last key returned in a truncated response.
KeyMarker *string `type:"string"`
+ // Specifies the maximum number of objects to return.
MaxKeys *int64 `type:"integer"`
+ // Bucket name.
Name *string `type:"string"`
- // Use this value for the key marker request parameter in a subsequent request.
+ // When the number of responses exceeds the value of MaxKeys, NextKeyMarker
+ // specifies the first key not returned that satisfies the search criteria.
+ // Use this value for the key-marker request parameter in a subsequent request.
NextKeyMarker *string `type:"string"`
- // Use this value for the next version id marker parameter in a subsequent request.
+ // When the number of responses exceeds the value of MaxKeys, NextVersionIdMarker
+ // specifies the first object version not returned that satisfies the search
+ // criteria. Use this value for the version-id-marker request parameter in a
+ // subsequent request.
NextVersionIdMarker *string `type:"string"`
+ // Selects objects that start with the value supplied by this parameter.
Prefix *string `type:"string"`
+ // Marks the last version of the key returned in a truncated response.
VersionIdMarker *string `type:"string"`
+ // Container for version information.
Versions []*ObjectVersion `locationName:"Version" type:"list" flattened:"true"`
}
@@ -16526,6 +21330,8 @@ func (s *ListObjectVersionsOutput) SetVersions(v []*ObjectVersion) *ListObjectVe
type ListObjectsInput struct {
_ struct{} `locationName:"ListObjectsRequest" type:"structure"`
+ // The name of the bucket containing the objects.
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@@ -16543,8 +21349,9 @@ type ListObjectsInput struct {
// Specifies the key to start with when listing objects in a bucket.
Marker *string `location:"querystring" locationName:"marker" type:"string"`
- // Sets the maximum number of keys returned in the response. The response might
- // contain fewer keys but will never contain more.
+ // Sets the maximum number of keys returned in the response. By default the
+ // API returns up to 1,000 key names. The response might contain fewer keys
+ // but will never contain more.
MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"`
// Limits the response to keys that begin with the specified prefix.
@@ -16631,37 +21438,77 @@ func (s *ListObjectsInput) SetRequestPayer(v string) *ListObjectsInput {
return s
}
+func (s *ListObjectsInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *ListObjectsInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type ListObjectsOutput struct {
_ struct{} `type:"structure"`
+ // All of the keys rolled up in a common prefix count as a single return when
+ // calculating the number of returns.
+ //
+ // A response can contain CommonPrefixes only if you specify a delimiter.
+ //
+ // CommonPrefixes contains all (if there are any) keys between Prefix and the
+ // next occurrence of the string specified by the delimiter.
+ //
+ // CommonPrefixes lists keys that act like subdirectories in the directory specified
+ // by Prefix.
+ //
+ // For example, if the prefix is notes/ and the delimiter is a slash (/) as
+ // in notes/summer/july, the common prefix is notes/summer/. All of the keys
+ // that roll up into a common prefix count as a single return when calculating
+ // the number of returns.
CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"`
+ // Metadata about each object returned.
Contents []*Object `type:"list" flattened:"true"`
+ // Causes keys that contain the same string between the prefix and the first
+ // occurrence of the delimiter to be rolled up into a single result element
+ // in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere
+ // in the response. Each rolled-up result counts as only one return against
+ // the MaxKeys value.
Delimiter *string `type:"string"`
// Encoding type used by Amazon S3 to encode object keys in the response.
EncodingType *string `type:"string" enum:"EncodingType"`
- // A flag that indicates whether or not Amazon S3 returned all of the results
- // that satisfied the search criteria.
+ // A flag that indicates whether Amazon S3 returned all of the results that
+ // satisfied the search criteria.
IsTruncated *bool `type:"boolean"`
+ // Indicates where in the bucket listing begins. Marker is included in the response
+ // if it was sent with the request.
Marker *string `type:"string"`
+ // The maximum number of keys returned in the response body.
MaxKeys *int64 `type:"integer"`
+ // Bucket name.
Name *string `type:"string"`
// When response is truncated (the IsTruncated element value in the response
// is true), you can use the key name in this field as marker in the subsequent
// request to get next set of objects. Amazon S3 lists objects in alphabetical
// order Note: This element is returned only if you have delimiter request parameter
- // specified. If response does not include the NextMaker and it is truncated,
+ // specified. If response does not include the NextMarker and it is truncated,
// you can use the value of the last Key in the response as the marker in the
// subsequent request to get the next set of object keys.
NextMarker *string `type:"string"`
+ // Keys that begin with the indicated prefix.
Prefix *string `type:"string"`
}
@@ -16738,14 +21585,21 @@ func (s *ListObjectsOutput) SetPrefix(v string) *ListObjectsOutput {
type ListObjectsV2Input struct {
_ struct{} `locationName:"ListObjectsV2Request" type:"structure"`
- // Name of the bucket to list.
+ // Bucket name to list.
+ //
+ // When using this API with an access point, you must direct requests to the
+ // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // When using this operation using an access point through the AWS SDKs, you
+ // provide the access point ARN in place of the bucket name. For more information
+ // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
+ // in the Amazon Simple Storage Service Developer Guide.
//
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
// ContinuationToken indicates Amazon S3 that the list is being continued on
// this bucket with a token. ContinuationToken is obfuscated and is not a real
- // key
+ // key.
ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"`
// A delimiter is a character you use to group keys.
@@ -16756,11 +21610,12 @@ type ListObjectsV2Input struct {
// The owner field is not present in listV2 by default, if you want to return
// owner field with each key in the result then set the fetch owner field to
- // true
+ // true.
FetchOwner *bool `location:"querystring" locationName:"fetch-owner" type:"boolean"`
- // Sets the maximum number of keys returned in the response. The response might
- // contain fewer keys but will never contain more.
+ // Sets the maximum number of keys returned in the response. By default the
+ // API returns up to 1,000 key names. The response might contain fewer keys
+ // but will never contain more.
MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"`
// Limits the response to keys that begin with the specified prefix.
@@ -16772,7 +21627,7 @@ type ListObjectsV2Input struct {
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
// StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts
- // listing after this specified key. StartAfter can be any key in the bucket
+ // listing after this specified key. StartAfter can be any key in the bucket.
StartAfter *string `location:"querystring" locationName:"start-after" type:"string"`
}
@@ -16863,29 +21718,65 @@ func (s *ListObjectsV2Input) SetStartAfter(v string) *ListObjectsV2Input {
return s
}
+func (s *ListObjectsV2Input) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *ListObjectsV2Input) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type ListObjectsV2Output struct {
_ struct{} `type:"structure"`
+ // All of the keys rolled up into a common prefix count as a single return when
+ // calculating the number of returns.
+ //
+ // A response can contain CommonPrefixes only if you specify a delimiter.
+ //
// CommonPrefixes contains all (if there are any) keys between Prefix and the
- // next occurrence of the string specified by delimiter
+ // next occurrence of the string specified by a delimiter.
+ //
+ // CommonPrefixes lists keys that act like subdirectories in the directory specified
+ // by Prefix.
+ //
+ // For example, if the prefix is notes/ and the delimiter is a slash (/) as
+ // in notes/summer/july, the common prefix is notes/summer/. All of the keys
+ // that roll up into a common prefix count as a single return when calculating
+ // the number of returns.
CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"`
// Metadata about each object returned.
Contents []*Object `type:"list" flattened:"true"`
- // ContinuationToken indicates Amazon S3 that the list is being continued on
- // this bucket with a token. ContinuationToken is obfuscated and is not a real
- // key
+ // If ContinuationToken was sent with the request, it is included in the response.
ContinuationToken *string `type:"string"`
- // A delimiter is a character you use to group keys.
+ // Causes keys that contain the same string between the prefix and the first
+ // occurrence of the delimiter to be rolled up into a single result element
+ // in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere
+ // in the response. Each rolled-up result counts as only one return against
+ // the MaxKeys value.
Delimiter *string `type:"string"`
- // Encoding type used by Amazon S3 to encode object keys in the response.
+ // Encoding type used by Amazon S3 to encode object key names in the XML response.
+ //
+ // If you specify the encoding-type request parameter, Amazon S3 includes this
+ // element in the response, and returns encoded key name values in the following
+ // response elements:
+ //
+ // Delimiter, Prefix, Key, and StartAfter.
EncodingType *string `type:"string" enum:"EncodingType"`
- // A flag that indicates whether or not Amazon S3 returned all of the results
- // that satisfied the search criteria.
+ // Set to false if all of the results were returned. Set to true if more keys
+ // are available to return. If the number of results exceeds that specified
+ // by MaxKeys, all of the results might not be returned.
IsTruncated *bool `type:"boolean"`
// KeyCount is the number of keys returned with this request. KeyCount will
@@ -16893,24 +21784,31 @@ type ListObjectsV2Output struct {
// result will include less than equals 50 keys
KeyCount *int64 `type:"integer"`
- // Sets the maximum number of keys returned in the response. The response might
- // contain fewer keys but will never contain more.
+ // Sets the maximum number of keys returned in the response. By default the
+ // API returns up to 1,000 key names. The response might contain fewer keys
+ // but will never contain more.
MaxKeys *int64 `type:"integer"`
- // Name of the bucket to list.
+ // Bucket name.
+ //
+ // When using this API with an access point, you must direct requests to the
+ // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // When using this operation using an access point through the AWS SDKs, you
+ // provide the access point ARN in place of the bucket name. For more information
+ // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
+ // in the Amazon Simple Storage Service Developer Guide.
Name *string `type:"string"`
- // NextContinuationToken is sent when isTruncated is true which means there
+ // NextContinuationToken is sent when isTruncated is true, which means there
// are more keys in the bucket that can be listed. The next list requests to
// Amazon S3 can be continued with this NextContinuationToken. NextContinuationToken
// is obfuscated and is not a real key
NextContinuationToken *string `type:"string"`
- // Limits the response to keys that begin with the specified prefix.
+ // Keys that begin with the indicated prefix.
Prefix *string `type:"string"`
- // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts
- // listing after this specified key. StartAfter can be any key in the bucket
+ // If StartAfter was sent with the request, it is included in the response.
StartAfter *string `type:"string"`
}
@@ -16999,9 +21897,20 @@ func (s *ListObjectsV2Output) SetStartAfter(v string) *ListObjectsV2Output {
type ListPartsInput struct {
_ struct{} `locationName:"ListPartsRequest" type:"structure"`
+ // Name of the bucket to which the parts are being uploaded.
+ //
+ // When using this API with an access point, you must direct requests to the
+ // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // When using this operation using an access point through the AWS SDKs, you
+ // provide the access point ARN in place of the bucket name. For more information
+ // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
+ // in the Amazon Simple Storage Service Developer Guide.
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+ // Object key for which the multipart upload was initiated.
+ //
// Key is a required field
Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
@@ -17012,10 +21921,11 @@ type ListPartsInput struct {
// part numbers will be listed.
PartNumberMarker *int64 `location:"querystring" locationName:"part-number-marker" type:"integer"`
- // Confirms that the requester knows that she or he will be charged for the
- // request. Bucket owners need not specify this parameter in their requests.
- // Documentation on downloading objects from requester pays buckets can be found
- // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
// Upload ID identifying the multipart upload whose parts are being listed.
@@ -17102,23 +22012,51 @@ func (s *ListPartsInput) SetUploadId(v string) *ListPartsInput {
return s
}
+func (s *ListPartsInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *ListPartsInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type ListPartsOutput struct {
_ struct{} `type:"structure"`
- // Date when multipart upload will become eligible for abort operation by lifecycle.
+ // If the bucket has a lifecycle rule configured with an action to abort incomplete
+ // multipart uploads and the prefix in the lifecycle rule matches the object
+ // name in the request, then the response includes this header indicating when
+ // the initiated multipart upload will become eligible for abort operation.
+ // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket
+ // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config).
+ //
+ // The response will also include the x-amz-abort-rule-id header that will provide
+ // the ID of the lifecycle configuration rule that defines this action.
AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"`
- // Id of the lifecycle rule that makes a multipart upload eligible for abort
- // operation.
+ // This header is returned along with the x-amz-abort-date header. It identifies
+ // applicable lifecycle configuration rule that defines the action to abort
+ // incomplete multipart uploads.
AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"`
// Name of the bucket to which the multipart upload was initiated.
Bucket *string `type:"string"`
- // Identifies who initiated the multipart upload.
+ // Container element that identifies who initiated the multipart upload. If
+ // the initiator is an AWS account, this element provides the same information
+ // as the Owner element. If the initiator is an IAM User, this element provides
+ // the user ARN and display name.
Initiator *Initiator `type:"structure"`
- // Indicates whether the returned list of parts is truncated.
+ // Indicates whether the returned list of parts is truncated. A true value indicates
+ // that the list was truncated. A list can be truncated if the number of parts
+ // exceeds the limit returned in the MaxParts element.
IsTruncated *bool `type:"boolean"`
// Object key for which the multipart upload was initiated.
@@ -17132,18 +22070,26 @@ type ListPartsOutput struct {
// in a subsequent request.
NextPartNumberMarker *int64 `type:"integer"`
+ // Container element that identifies the object owner, after the object is created.
+ // If multipart upload is initiated by an IAM user, this element provides the
+ // parent account ID and display name.
Owner *Owner `type:"structure"`
- // Part number after which listing begins.
+ // When a list is truncated, this element specifies the last part in the list,
+ // as well as the value to use for the part-number-marker request parameter
+ // in a subsequent request.
PartNumberMarker *int64 `type:"integer"`
+ // Container for elements related to a particular part. A response can contain
+ // zero or more Part elements.
Parts []*Part `locationName:"Part" type:"list" flattened:"true"`
// If present, indicates that the requester was successfully charged for the
// request.
RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
- // The class of storage used to store the object.
+ // Class of storage (STANDARD or REDUCED_REDUNDANCY) used to store the uploaded
+ // object.
StorageClass *string `type:"string" enum:"StorageClass"`
// Upload ID identifying the multipart upload whose parts are being listed.
@@ -17251,7 +22197,8 @@ func (s *ListPartsOutput) SetUploadId(v string) *ListPartsOutput {
return s
}
-// Describes an S3 location that will receive the results of the restore request.
+// Describes an Amazon S3 location that will receive the results of the restore
+// request.
type Location struct {
_ struct{} `type:"structure"`
@@ -17266,8 +22213,7 @@ type Location struct {
// The canned ACL to apply to the restore results.
CannedACL *string `type:"string" enum:"ObjectCannedACL"`
- // Describes the server-side encryption that will be applied to the restore
- // results.
+ // Contains the type of server-side encryption used.
Encryption *Encryption `type:"structure"`
// The prefix that is prepended to the restore results for this request.
@@ -17389,13 +22335,14 @@ type LoggingEnabled struct {
// Specifies the bucket where you want Amazon S3 to store server access logs.
// You can have your logs delivered to any bucket that you own, including the
// same bucket that is being logged. You can also configure multiple buckets
- // to deliver their logs to the same target bucket. In this case you should
+ // to deliver their logs to the same target bucket. In this case, you should
// choose a different TargetPrefix for each source bucket so that the delivered
// log files can be distinguished by key.
//
// TargetBucket is a required field
TargetBucket *string `type:"string" required:"true"`
+ // Container for granting information.
TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"`
// A prefix for all log object keys. If you store log files from multiple Amazon
@@ -17464,8 +22411,10 @@ func (s *LoggingEnabled) SetTargetPrefix(v string) *LoggingEnabled {
type MetadataEntry struct {
_ struct{} `type:"structure"`
+ // Name of the Object.
Name *string `type:"string"`
+ // Value of the Object.
Value *string `type:"string"`
}
@@ -17491,6 +22440,65 @@ func (s *MetadataEntry) SetValue(v string) *MetadataEntry {
return s
}
+// A container specifying replication metrics-related settings enabling metrics
+// and Amazon S3 events for S3 Replication Time Control (S3 RTC). Must be specified
+// together with a ReplicationTime block.
+type Metrics struct {
+ _ struct{} `type:"structure"`
+
+ // A container specifying the time threshold for emitting the s3:Replication:OperationMissedThreshold
+ // event.
+ //
+ // EventThreshold is a required field
+ EventThreshold *ReplicationTimeValue `type:"structure" required:"true"`
+
+ // Specifies whether the replication metrics are enabled.
+ //
+ // Status is a required field
+ Status *string `type:"string" required:"true" enum:"MetricsStatus"`
+}
+
+// String returns the string representation
+func (s Metrics) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Metrics) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *Metrics) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "Metrics"}
+ if s.EventThreshold == nil {
+ invalidParams.Add(request.NewErrParamRequired("EventThreshold"))
+ }
+ if s.Status == nil {
+ invalidParams.Add(request.NewErrParamRequired("Status"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetEventThreshold sets the EventThreshold field's value.
+func (s *Metrics) SetEventThreshold(v *ReplicationTimeValue) *Metrics {
+ s.EventThreshold = v
+ return s
+}
+
+// SetStatus sets the Status field's value.
+func (s *Metrics) SetStatus(v string) *Metrics {
+ s.Status = &v
+ return s
+}
+
+// A conjunction (logical AND) of predicates, which is used in evaluating a
+// metrics filter. The operator must have at least two predicates, and an object
+// must match all of the predicates in order for the filter to apply.
type MetricsAndOperator struct {
_ struct{} `type:"structure"`
@@ -17604,6 +22612,9 @@ func (s *MetricsConfiguration) SetId(v string) *MetricsConfiguration {
return s
}
+// Specifies a metrics configuration filter. The metrics configuration only
+// includes objects that meet the filter's criteria. A filter must be a prefix,
+// a tag, or a conjunction (MetricsAndOperator).
type MetricsFilter struct {
_ struct{} `type:"structure"`
@@ -17667,6 +22678,7 @@ func (s *MetricsFilter) SetTag(v *Tag) *MetricsFilter {
return s
}
+// Container for the MultipartUpload for the Amazon S3 object.
type MultipartUpload struct {
_ struct{} `type:"structure"`
@@ -17679,6 +22691,7 @@ type MultipartUpload struct {
// Key of the object for which the multipart upload was initiated.
Key *string `min:"1" type:"string"`
+ // Specifies the owner of the object that is part of the multipart upload.
Owner *Owner `type:"structure"`
// The class of storage used to store the object.
@@ -17778,8 +22791,8 @@ type NoncurrentVersionTransition struct {
// Specifies the number of days an object is noncurrent before Amazon S3 can
// perform the associated action. For information about the noncurrent days
- // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent
- // (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html)
+ // calculations, see How Amazon S3 Calculates How Long an Object Has Been Noncurrent
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations)
// in the Amazon Simple Storage Service Developer Guide.
NoncurrentDays *int64 `type:"integer"`
@@ -17898,10 +22911,17 @@ func (s *NotificationConfiguration) SetTopicConfigurations(v []*TopicConfigurati
type NotificationConfigurationDeprecated struct {
_ struct{} `type:"structure"`
+ // Container for specifying the AWS Lambda notification configuration.
CloudFunctionConfiguration *CloudFunctionConfiguration `type:"structure"`
+ // This data type is deprecated. This data type specifies the configuration
+ // for publishing messages to an Amazon Simple Queue Service (Amazon SQS) queue
+ // when Amazon S3 detects specified events.
QueueConfiguration *QueueConfigurationDeprecated `type:"structure"`
+ // This data type is deprecated. A container for specifying the configuration
+ // for publication of messages to an Amazon Simple Notification Service (Amazon
+ // SNS) topic when Amazon S3 detects specified events.
TopicConfiguration *TopicConfigurationDeprecated `type:"structure"`
}
@@ -17959,17 +22979,39 @@ func (s *NotificationConfigurationFilter) SetKey(v *KeyFilter) *NotificationConf
return s
}
+// An object consists of data and its descriptive metadata.
type Object struct {
_ struct{} `type:"structure"`
+ // The entity tag is a hash of the object. The ETag reflects changes only to
+ // the contents of an object, not its metadata. The ETag may or may not be an
+ // MD5 digest of the object data. Whether or not it is depends on how the object
+ // was created and how it is encrypted as described below:
+ //
+ // * Objects created by the PUT Object, POST Object, or Copy operation, or
+ // through the AWS Management Console, and are encrypted by SSE-S3 or plaintext,
+ // have ETags that are an MD5 digest of their object data.
+ //
+ // * Objects created by the PUT Object, POST Object, or Copy operation, or
+ // through the AWS Management Console, and are encrypted by SSE-C or SSE-KMS,
+ // have ETags that are not an MD5 digest of their object data.
+ //
+ // * If an object is created by either the Multipart Upload or Part Copy
+ // operation, the ETag is not an MD5 digest, regardless of the method of
+ // encryption.
ETag *string `type:"string"`
+ // The name that you assign to an object. You use the object key to retrieve
+ // the object.
Key *string `min:"1" type:"string"`
+ // The date the Object was Last Modified
LastModified *time.Time `type:"timestamp"`
+ // The owner of the object
Owner *Owner `type:"structure"`
+ // Size in bytes of the object
Size *int64 `type:"integer"`
// The class of storage used to store the object.
@@ -18022,6 +23064,7 @@ func (s *Object) SetStorageClass(v string) *Object {
return s
}
+// Object Identifier is unique value to identify objects.
type ObjectIdentifier struct {
_ struct{} `type:"structure"`
@@ -18072,14 +23115,14 @@ func (s *ObjectIdentifier) SetVersionId(v string) *ObjectIdentifier {
return s
}
-// The container element for object lock configuration parameters.
+// The container element for Object Lock configuration parameters.
type ObjectLockConfiguration struct {
_ struct{} `type:"structure"`
- // Indicates whether this bucket has an object lock configuration enabled.
+ // Indicates whether this bucket has an Object Lock configuration enabled.
ObjectLockEnabled *string `type:"string" enum:"ObjectLockEnabled"`
- // The object lock rule in place for the specified object.
+ // The Object Lock rule in place for the specified object.
Rule *ObjectLockRule `type:"structure"`
}
@@ -18136,7 +23179,7 @@ type ObjectLockRetention struct {
// Indicates the Retention mode for the specified object.
Mode *string `type:"string" enum:"ObjectLockRetentionMode"`
- // The date on which this object lock retention expires.
+ // The date on which this Object Lock Retention will expire.
RetainUntilDate *time.Time `type:"timestamp" timestampFormat:"iso8601"`
}
@@ -18162,7 +23205,7 @@ func (s *ObjectLockRetention) SetRetainUntilDate(v time.Time) *ObjectLockRetenti
return s
}
-// The container element for an object lock rule.
+// The container element for an Object Lock rule.
type ObjectLockRule struct {
_ struct{} `type:"structure"`
@@ -18187,9 +23230,11 @@ func (s *ObjectLockRule) SetDefaultRetention(v *DefaultRetention) *ObjectLockRul
return s
}
+// The version of an object.
type ObjectVersion struct {
_ struct{} `type:"structure"`
+ // The entity tag is an MD5 hash of that version of the object.
ETag *string `type:"string"`
// Specifies whether the object is (true) or is not (false) the latest version
@@ -18202,6 +23247,7 @@ type ObjectVersion struct {
// Date and time the object was last modified.
LastModified *time.Time `type:"timestamp"`
+ // Specifies the owner of the object.
Owner *Owner `type:"structure"`
// Size in bytes of the object.
@@ -18344,11 +23390,14 @@ func (s *OutputSerialization) SetJSON(v *JSONOutput) *OutputSerialization {
return s
}
+// Container for the owner's display name and ID.
type Owner struct {
_ struct{} `type:"structure"`
+ // Container for the display name of the owner.
DisplayName *string `type:"string"`
+ // Container for the ID of the owner.
ID *string `type:"string"`
}
@@ -18374,6 +23423,7 @@ func (s *Owner) SetID(v string) *Owner {
return s
}
+// Container for Parquet.
type ParquetInput struct {
_ struct{} `type:"structure"`
}
@@ -18388,6 +23438,7 @@ func (s ParquetInput) GoString() string {
return s.String()
}
+// Container for elements related to a part.
type Part struct {
_ struct{} `type:"structure"`
@@ -18464,6 +23515,7 @@ func (s *PolicyStatus) SetIsPublic(v bool) *PolicyStatus {
return s
}
+// This data type contains information about progress of an operation.
type Progress struct {
_ struct{} `type:"structure"`
@@ -18505,6 +23557,7 @@ func (s *Progress) SetBytesScanned(v int64) *Progress {
return s
}
+// This data type contains information about the progress event of an operation.
type ProgressEvent struct {
_ struct{} `locationName:"ProgressEvent" type:"structure" payload:"Details"`
@@ -18545,7 +23598,23 @@ func (s *ProgressEvent) UnmarshalEvent(
return nil
}
-// Specifies the Block Public Access configuration for an Amazon S3 bucket.
+// MarshalEvent marshals the type into an stream event value. This method
+// should only used internally within the SDK's EventStream handling.
+func (s *ProgressEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) {
+ msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType))
+ var buf bytes.Buffer
+ if err = pm.MarshalPayload(&buf, s); err != nil {
+ return eventstream.Message{}, err
+ }
+ msg.Payload = buf.Bytes()
+ return msg, err
+}
+
+// The PublicAccessBlock configuration that you want to apply to this Amazon
+// S3 bucket. You can enable the configuration options in any combination. For
+// more information about when Amazon S3 considers a bucket or object public,
+// see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status)
+// in the Amazon Simple Storage Service Developer Guide.
type PublicAccessBlockConfiguration struct {
_ struct{} `type:"structure"`
@@ -18558,6 +23627,8 @@ type PublicAccessBlockConfiguration struct {
//
// * PUT Object calls fail if the request includes a public ACL.
//
+ // * PUT Bucket calls fail if the request includes a public ACL.
+ //
// Enabling this setting doesn't affect existing policies or ACLs.
BlockPublicAcls *bool `locationName:"BlockPublicAcls" type:"boolean"`
@@ -18624,7 +23695,7 @@ func (s *PublicAccessBlockConfiguration) SetRestrictPublicBuckets(v bool) *Publi
type PutBucketAccelerateConfigurationInput struct {
_ struct{} `locationName:"PutBucketAccelerateConfigurationRequest" type:"structure" payload:"AccelerateConfiguration"`
- // Specifies the Accelerate Configuration you want to set for the bucket.
+ // Container for setting the transfer acceleration state.
//
// AccelerateConfiguration is a required field
AccelerateConfiguration *AccelerateConfiguration `locationName:"AccelerateConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
@@ -18683,6 +23754,20 @@ func (s *PutBucketAccelerateConfigurationInput) getBucket() (v string) {
return *s.Bucket
}
+func (s *PutBucketAccelerateConfigurationInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *PutBucketAccelerateConfigurationInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type PutBucketAccelerateConfigurationOutput struct {
_ struct{} `type:"structure"`
}
@@ -18706,6 +23791,8 @@ type PutBucketAclInput struct {
// Contains the elements that set the ACL permissions for an object per grantee.
AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
+ // The bucket to which to apply the ACL.
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@@ -18812,6 +23899,20 @@ func (s *PutBucketAclInput) SetGrantWriteACP(v string) *PutBucketAclInput {
return s
}
+func (s *PutBucketAclInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *PutBucketAclInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type PutBucketAclOutput struct {
_ struct{} `type:"structure"`
}
@@ -18907,6 +24008,20 @@ func (s *PutBucketAnalyticsConfigurationInput) SetId(v string) *PutBucketAnalyti
return s
}
+func (s *PutBucketAnalyticsConfigurationInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *PutBucketAnalyticsConfigurationInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type PutBucketAnalyticsConfigurationOutput struct {
_ struct{} `type:"structure"`
}
@@ -18924,6 +24039,8 @@ func (s PutBucketAnalyticsConfigurationOutput) GoString() string {
type PutBucketCorsInput struct {
_ struct{} `locationName:"PutBucketCorsRequest" type:"structure" payload:"CORSConfiguration"`
+ // Specifies the bucket impacted by the corsconfiguration.
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@@ -18989,6 +24106,20 @@ func (s *PutBucketCorsInput) SetCORSConfiguration(v *CORSConfiguration) *PutBuck
return s
}
+func (s *PutBucketCorsInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *PutBucketCorsInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type PutBucketCorsOutput struct {
_ struct{} `type:"structure"`
}
@@ -19007,9 +24138,9 @@ type PutBucketEncryptionInput struct {
_ struct{} `locationName:"PutBucketEncryptionRequest" type:"structure" payload:"ServerSideEncryptionConfiguration"`
// Specifies default encryption for a bucket using server-side encryption with
- // Amazon S3-managed keys (SSE-S3) or AWS KMS-managed keys (SSE-KMS). For information
- // about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket
- // Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html)
+ // Amazon S3-managed keys (SSE-S3) or customer master keys stored in AWS KMS
+ // (SSE-KMS). For information about the Amazon S3 default encryption feature,
+ // see Amazon S3 Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html)
// in the Amazon Simple Storage Service Developer Guide.
//
// Bucket is a required field
@@ -19074,6 +24205,20 @@ func (s *PutBucketEncryptionInput) SetServerSideEncryptionConfiguration(v *Serve
return s
}
+func (s *PutBucketEncryptionInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *PutBucketEncryptionInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type PutBucketEncryptionOutput struct {
_ struct{} `type:"structure"`
}
@@ -19169,6 +24314,20 @@ func (s *PutBucketInventoryConfigurationInput) SetInventoryConfiguration(v *Inve
return s
}
+func (s *PutBucketInventoryConfigurationInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *PutBucketInventoryConfigurationInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type PutBucketInventoryConfigurationOutput struct {
_ struct{} `type:"structure"`
}
@@ -19186,12 +24345,12 @@ func (s PutBucketInventoryConfigurationOutput) GoString() string {
type PutBucketLifecycleConfigurationInput struct {
_ struct{} `locationName:"PutBucketLifecycleConfigurationRequest" type:"structure" payload:"LifecycleConfiguration"`
+ // The name of the bucket for which to set the configuration.
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // Specifies the lifecycle configuration for objects in an Amazon S3 bucket.
- // For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html)
- // in the Amazon Simple Storage Service Developer Guide.
+ // Container for lifecycle rules. You can add as many as 1,000 rules.
LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
}
@@ -19245,6 +24404,20 @@ func (s *PutBucketLifecycleConfigurationInput) SetLifecycleConfiguration(v *Buck
return s
}
+func (s *PutBucketLifecycleConfigurationInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *PutBucketLifecycleConfigurationInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type PutBucketLifecycleConfigurationOutput struct {
_ struct{} `type:"structure"`
}
@@ -19265,6 +24438,7 @@ type PutBucketLifecycleInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+ // Container for lifecycle rules. You can add as many as 1000 rules.
LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
}
@@ -19318,6 +24492,20 @@ func (s *PutBucketLifecycleInput) SetLifecycleConfiguration(v *LifecycleConfigur
return s
}
+func (s *PutBucketLifecycleInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *PutBucketLifecycleInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type PutBucketLifecycleOutput struct {
_ struct{} `type:"structure"`
}
@@ -19335,9 +24523,13 @@ func (s PutBucketLifecycleOutput) GoString() string {
type PutBucketLoggingInput struct {
_ struct{} `locationName:"PutBucketLoggingRequest" type:"structure" payload:"BucketLoggingStatus"`
+ // The name of the bucket for which to set the logging parameters.
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+ // Container for logging status information.
+ //
// BucketLoggingStatus is a required field
BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
}
@@ -19395,6 +24587,20 @@ func (s *PutBucketLoggingInput) SetBucketLoggingStatus(v *BucketLoggingStatus) *
return s
}
+func (s *PutBucketLoggingInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *PutBucketLoggingInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type PutBucketLoggingOutput struct {
_ struct{} `type:"structure"`
}
@@ -19490,6 +24696,20 @@ func (s *PutBucketMetricsConfigurationInput) SetMetricsConfiguration(v *MetricsC
return s
}
+func (s *PutBucketMetricsConfigurationInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *PutBucketMetricsConfigurationInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type PutBucketMetricsConfigurationOutput struct {
_ struct{} `type:"structure"`
}
@@ -19507,6 +24727,8 @@ func (s PutBucketMetricsConfigurationOutput) GoString() string {
type PutBucketNotificationConfigurationInput struct {
_ struct{} `locationName:"PutBucketNotificationConfigurationRequest" type:"structure" payload:"NotificationConfiguration"`
+ // The name of the bucket.
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@@ -19570,6 +24792,20 @@ func (s *PutBucketNotificationConfigurationInput) SetNotificationConfiguration(v
return s
}
+func (s *PutBucketNotificationConfigurationInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *PutBucketNotificationConfigurationInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type PutBucketNotificationConfigurationOutput struct {
_ struct{} `type:"structure"`
}
@@ -19587,9 +24823,13 @@ func (s PutBucketNotificationConfigurationOutput) GoString() string {
type PutBucketNotificationInput struct {
_ struct{} `locationName:"PutBucketNotificationRequest" type:"structure" payload:"NotificationConfiguration"`
+ // The name of the bucket.
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+ // The container for the configuration.
+ //
// NotificationConfiguration is a required field
NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
}
@@ -19642,6 +24882,20 @@ func (s *PutBucketNotificationInput) SetNotificationConfiguration(v *Notificatio
return s
}
+func (s *PutBucketNotificationInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *PutBucketNotificationInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type PutBucketNotificationOutput struct {
_ struct{} `type:"structure"`
}
@@ -19659,6 +24913,8 @@ func (s PutBucketNotificationOutput) GoString() string {
type PutBucketPolicyInput struct {
_ struct{} `locationName:"PutBucketPolicyRequest" type:"structure" payload:"Policy"`
+ // The name of the bucket.
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@@ -19726,6 +24982,20 @@ func (s *PutBucketPolicyInput) SetPolicy(v string) *PutBucketPolicyInput {
return s
}
+func (s *PutBucketPolicyInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *PutBucketPolicyInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type PutBucketPolicyOutput struct {
_ struct{} `type:"structure"`
}
@@ -19743,6 +25013,8 @@ func (s PutBucketPolicyOutput) GoString() string {
type PutBucketReplicationInput struct {
_ struct{} `locationName:"PutBucketReplicationRequest" type:"structure" payload:"ReplicationConfiguration"`
+ // The name of the bucket
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@@ -19752,7 +25024,6 @@ type PutBucketReplicationInput struct {
// ReplicationConfiguration is a required field
ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
- // A token that allows Amazon S3 object lock to be enabled for an existing bucket.
Token *string `location:"header" locationName:"x-amz-bucket-object-lock-token" type:"string"`
}
@@ -19815,6 +25086,20 @@ func (s *PutBucketReplicationInput) SetToken(v string) *PutBucketReplicationInpu
return s
}
+func (s *PutBucketReplicationInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *PutBucketReplicationInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type PutBucketReplicationOutput struct {
_ struct{} `type:"structure"`
}
@@ -19832,9 +25117,13 @@ func (s PutBucketReplicationOutput) GoString() string {
type PutBucketRequestPaymentInput struct {
_ struct{} `locationName:"PutBucketRequestPaymentRequest" type:"structure" payload:"RequestPaymentConfiguration"`
+ // The bucket name.
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+ // Container for Payer.
+ //
// RequestPaymentConfiguration is a required field
RequestPaymentConfiguration *RequestPaymentConfiguration `locationName:"RequestPaymentConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
}
@@ -19892,6 +25181,20 @@ func (s *PutBucketRequestPaymentInput) SetRequestPaymentConfiguration(v *Request
return s
}
+func (s *PutBucketRequestPaymentInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *PutBucketRequestPaymentInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type PutBucketRequestPaymentOutput struct {
_ struct{} `type:"structure"`
}
@@ -19909,9 +25212,13 @@ func (s PutBucketRequestPaymentOutput) GoString() string {
type PutBucketTaggingInput struct {
_ struct{} `locationName:"PutBucketTaggingRequest" type:"structure" payload:"Tagging"`
+ // The bucket name.
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+ // Container for the TagSet and Tag elements.
+ //
// Tagging is a required field
Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
}
@@ -19969,6 +25276,20 @@ func (s *PutBucketTaggingInput) SetTagging(v *Tagging) *PutBucketTaggingInput {
return s
}
+func (s *PutBucketTaggingInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *PutBucketTaggingInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type PutBucketTaggingOutput struct {
_ struct{} `type:"structure"`
}
@@ -19986,6 +25307,8 @@ func (s PutBucketTaggingOutput) GoString() string {
type PutBucketVersioningInput struct {
_ struct{} `locationName:"PutBucketVersioningRequest" type:"structure" payload:"VersioningConfiguration"`
+ // The bucket name.
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@@ -19993,9 +25316,7 @@ type PutBucketVersioningInput struct {
// and the value that is displayed on your authentication device.
MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"`
- // Describes the versioning state of an Amazon S3 bucket. For more information,
- // see PUT Bucket versioning (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html)
- // in the Amazon Simple Storage Service API Reference.
+ // Container for setting the versioning state.
//
// VersioningConfiguration is a required field
VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
@@ -20055,6 +25376,20 @@ func (s *PutBucketVersioningInput) SetVersioningConfiguration(v *VersioningConfi
return s
}
+func (s *PutBucketVersioningInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *PutBucketVersioningInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type PutBucketVersioningOutput struct {
_ struct{} `type:"structure"`
}
@@ -20072,10 +25407,12 @@ func (s PutBucketVersioningOutput) GoString() string {
type PutBucketWebsiteInput struct {
_ struct{} `locationName:"PutBucketWebsiteRequest" type:"structure" payload:"WebsiteConfiguration"`
+ // The bucket name.
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // Specifies website configuration parameters for an Amazon S3 bucket.
+ // Container for the request.
//
// WebsiteConfiguration is a required field
WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
@@ -20134,6 +25471,20 @@ func (s *PutBucketWebsiteInput) SetWebsiteConfiguration(v *WebsiteConfiguration)
return s
}
+func (s *PutBucketWebsiteInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *PutBucketWebsiteInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type PutBucketWebsiteOutput struct {
_ struct{} `type:"structure"`
}
@@ -20151,12 +25502,23 @@ func (s PutBucketWebsiteOutput) GoString() string {
type PutObjectAclInput struct {
_ struct{} `locationName:"PutObjectAclRequest" type:"structure" payload:"AccessControlPolicy"`
- // The canned ACL to apply to the object.
+ // The canned ACL to apply to the object. For more information, see Canned ACL
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL).
ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
// Contains the elements that set the ACL permissions for an object per grantee.
AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
+ // The bucket name that contains the object to which you want to attach the
+ // ACL.
+ //
+ // When using this API with an access point, you must direct requests to the
+ // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // When using this operation using an access point through the AWS SDKs, you
+ // provide the access point ARN in place of the bucket name. For more information
+ // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
+ // in the Amazon Simple Storage Service Developer Guide.
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@@ -20176,13 +25538,16 @@ type PutObjectAclInput struct {
// Allows grantee to write the ACL for the applicable bucket.
GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
+ // Key for which the PUT operation was initiated.
+ //
// Key is a required field
Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
- // Confirms that the requester knows that she or he will be charged for the
- // request. Bucket owners need not specify this parameter in their requests.
- // Documentation on downloading objects from requester pays buckets can be found
- // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
// VersionId used to reference a specific version of the object.
@@ -20299,6 +25664,20 @@ func (s *PutObjectAclInput) SetVersionId(v string) *PutObjectAclInput {
return s
}
+func (s *PutObjectAclInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *PutObjectAclInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type PutObjectAclOutput struct {
_ struct{} `type:"structure"`
@@ -20326,44 +25705,62 @@ func (s *PutObjectAclOutput) SetRequestCharged(v string) *PutObjectAclOutput {
type PutObjectInput struct {
_ struct{} `locationName:"PutObjectRequest" type:"structure" payload:"Body"`
- // The canned ACL to apply to the object.
+ // The canned ACL to apply to the object. For more information, see Canned ACL
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL).
ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
// Object data.
Body io.ReadSeeker `type:"blob"`
- // Name of the bucket to which the PUT operation was initiated.
+ // Bucket name to which the PUT operation was initiated.
+ //
+ // When using this API with an access point, you must direct requests to the
+ // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // When using this operation using an access point through the AWS SDKs, you
+ // provide the access point ARN in place of the bucket name. For more information
+ // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
+ // in the Amazon Simple Storage Service Developer Guide.
//
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // Specifies caching behavior along the request/reply chain.
+ // Can be used to specify caching behavior along the request/reply chain. For
+ // more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9
+ // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9).
CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
- // Specifies presentational information for the object.
+ // Specifies presentational information for the object. For more information,
+ // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1).
ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"`
// Specifies what content encodings have been applied to the object and thus
// what decoding mechanisms must be applied to obtain the media-type referenced
- // by the Content-Type header field.
+ // by the Content-Type header field. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11
+ // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11).
ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"`
// The language the content is in.
ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"`
// Size of the body in bytes. This parameter is useful when the size of the
- // body cannot be determined automatically.
+ // body cannot be determined automatically. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13
+ // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13).
ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"`
- // The base64-encoded 128-bit MD5 digest of the part data. This parameter is
- // auto-populated when using the command from the CLI. This parameted is required
- // if object lock parameters are specified.
+ // The base64-encoded 128-bit MD5 digest of the message (without the headers)
+ // according to RFC 1864. This header can be used as a message integrity check
+ // to verify that the data is the same data that was originally sent. Although
+ // it is optional, we recommend using the Content-MD5 mechanism as an end-to-end
+ // integrity check. For more information about REST request authentication,
+ // see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html).
ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"`
- // A standard MIME type describing the format of the object data.
+ // A standard MIME type describing the format of the contents. For more information,
+ // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17).
ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
- // The date and time at which the object is no longer cacheable.
+ // The date and time at which the object is no longer cacheable. For more information,
+ // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21).
Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"`
// Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
@@ -20386,34 +25783,37 @@ type PutObjectInput struct {
// A map of metadata to store with the object in S3.
Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
- // The Legal Hold status that you want to apply to the specified object.
+ // Specifies whether a legal hold will be applied to this object. For more information
+ // about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html).
ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"`
- // The object lock mode that you want to apply to this object.
+ // The Object Lock mode that you want to apply to this object.
ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"`
- // The date and time when you want this object's object lock to expire.
+ // The date and time when you want this object's Object Lock to expire.
ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"`
- // Confirms that the requester knows that she or he will be charged for the
- // request. Bucket owners need not specify this parameter in their requests.
- // Documentation on downloading objects from requester pays buckets can be found
- // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
- // Specifies the algorithm to use to when encrypting the object (e.g., AES256).
+ // Specifies the algorithm to use to when encrypting the object (for example,
+ // AES256).
SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
// Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
// data. This value is used to store the object and then it is discarded; Amazon
- // does not store the encryption key. The key must be appropriate for use with
- // the algorithm specified in the x-amz-server-side-encryption-customer-algorithm
+ // S3 does not store the encryption key. The key must be appropriate for use
+ // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm
// header.
SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
- // Amazon S3 uses this header for a message integrity check to ensure the encryption
- // key was transmitted without error.
+ // Amazon S3 uses this header for a message integrity check to ensure that the
+ // encryption key was transmitted without error.
SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
// Specifies the AWS KMS Encryption Context to use for object encryption. The
@@ -20421,17 +25821,24 @@ type PutObjectInput struct {
// encryption context key-value pairs.
SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"`
- // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT
- // requests for an object protected by AWS KMS will fail if not made via SSL
- // or using SigV4. Documentation on configuring any of the officially supported
- // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version
+ // If x-amz-server-side-encryption is present and has the value of aws:kms,
+ // this header specifies the ID of the AWS Key Management Service (AWS KMS)
+ // symmetrical customer managed customer master key (CMK) that was used for
+ // the object.
+ //
+ // If the value of x-amz-server-side-encryption is aws:kms, this header specifies
+ // the ID of the symmetric customer managed AWS KMS CMK that will be used for
+ // the object. If you specify x-amz-server-side-encryption:aws:kms, but do not
+ // providex-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the AWS
+ // managed CMK in AWS to protect the data.
SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
- // The Server-side encryption algorithm used when storing this object in S3
- // (e.g., AES256, aws:kms).
+ // The server-side encryption algorithm used when storing this object in Amazon
+ // S3 (for example, AES256, aws:kms).
ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
- // The type of storage to use for the object. Defaults to 'STANDARD'.
+ // If you don't specify, S3 Standard is the default storage class. Amazon S3
+ // supports other storage classes.
StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
// The tag-set for the object. The tag-set must be encoded as URL Query parameters.
@@ -20440,7 +25847,22 @@ type PutObjectInput struct {
// If the bucket is configured as a website, redirects requests for this object
// to another object in the same bucket or to an external URL. Amazon S3 stores
- // the value of this header in the object metadata.
+ // the value of this header in the object metadata. For information about object
+ // metadata, see Object Key and Metadata (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html).
+ //
+ // In the following example, the request header sets the redirect to an object
+ // (anotherPage.html) in the same bucket:
+ //
+ // x-amz-website-redirect-location: /anotherPage.html
+ //
+ // In the following example, the request header sets the object redirect to
+ // another website:
+ //
+ // x-amz-website-redirect-location: http://www.example.com/
+ //
+ // For more information about website hosting in Amazon S3, see Hosting Websites
+ // on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html)
+ // and How to Configure Website Page Redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html).
WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
}
@@ -20670,10 +26092,32 @@ func (s *PutObjectInput) SetWebsiteRedirectLocation(v string) *PutObjectInput {
return s
}
+func (s *PutObjectInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *PutObjectInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type PutObjectLegalHoldInput struct {
_ struct{} `locationName:"PutObjectLegalHoldRequest" type:"structure" payload:"LegalHold"`
- // The bucket containing the object that you want to place a Legal Hold on.
+ // The bucket name containing the object that you want to place a Legal Hold
+ // on.
+ //
+ // When using this API with an access point, you must direct requests to the
+ // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // When using this operation using an access point through the AWS SDKs, you
+ // provide the access point ARN in place of the bucket name. For more information
+ // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
+ // in the Amazon Simple Storage Service Developer Guide.
//
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@@ -20687,10 +26131,11 @@ type PutObjectLegalHoldInput struct {
// specified object.
LegalHold *ObjectLockLegalHold `locationName:"LegalHold" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
- // Confirms that the requester knows that she or he will be charged for the
- // request. Bucket owners need not specify this parameter in their requests.
- // Documentation on downloading objects from requester pays buckets can be found
- // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
// The version ID of the object that you want to place a Legal Hold on.
@@ -20766,6 +26211,20 @@ func (s *PutObjectLegalHoldInput) SetVersionId(v string) *PutObjectLegalHoldInpu
return s
}
+func (s *PutObjectLegalHoldInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *PutObjectLegalHoldInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type PutObjectLegalHoldOutput struct {
_ struct{} `type:"structure"`
@@ -20793,21 +26252,22 @@ func (s *PutObjectLegalHoldOutput) SetRequestCharged(v string) *PutObjectLegalHo
type PutObjectLockConfigurationInput struct {
_ struct{} `locationName:"PutObjectLockConfigurationRequest" type:"structure" payload:"ObjectLockConfiguration"`
- // The bucket whose object lock configuration you want to create or replace.
+ // The bucket whose Object Lock configuration you want to create or replace.
//
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // The object lock configuration that you want to apply to the specified bucket.
+ // The Object Lock configuration that you want to apply to the specified bucket.
ObjectLockConfiguration *ObjectLockConfiguration `locationName:"ObjectLockConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
- // Confirms that the requester knows that she or he will be charged for the
- // request. Bucket owners need not specify this parameter in their requests.
- // Documentation on downloading objects from requester pays buckets can be found
- // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
- // A token to allow Amazon S3 object lock to be enabled for an existing bucket.
+ // A token to allow Object Lock to be enabled for an existing bucket.
Token *string `location:"header" locationName:"x-amz-bucket-object-lock-token" type:"string"`
}
@@ -20868,6 +26328,20 @@ func (s *PutObjectLockConfigurationInput) SetToken(v string) *PutObjectLockConfi
return s
}
+func (s *PutObjectLockConfigurationInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *PutObjectLockConfigurationInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type PutObjectLockConfigurationOutput struct {
_ struct{} `type:"structure"`
@@ -20898,8 +26372,11 @@ type PutObjectOutput struct {
// Entity tag for the uploaded object.
ETag *string `location:"header" locationName:"ETag" type:"string"`
- // If the object expiration is configured, this will contain the expiration
- // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded.
+ // If the expiration is configured for the object (see PutBucketLifecycleConfiguration
+ // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html)),
+ // the response includes this header. It includes the expiry-date and rule-id
+ // key-value pairs that provide information about object expiration. The value
+ // of the rule-id is URL encoded.
Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"`
// If present, indicates that the requester was successfully charged for the
@@ -20912,7 +26389,7 @@ type PutObjectOutput struct {
SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
// If server-side encryption with a customer-provided encryption key was requested,
- // the response will include this header to provide round trip message integrity
+ // the response will include this header to provide round-trip message integrity
// verification of the customer-provided encryption key.
SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
@@ -20921,12 +26398,16 @@ type PutObjectOutput struct {
// the encryption context key-value pairs.
SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"`
- // If present, specifies the ID of the AWS Key Management Service (KMS) master
- // encryption key that was used for the object.
+ // If x-amz-server-side-encryption is present and has the value of aws:kms,
+ // this header specifies the ID of the AWS Key Management Service (AWS KMS)
+ // symmetric customer managed customer master key (CMK) that was used for the
+ // object.
SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
- // The Server-side encryption algorithm used when storing this object in S3
- // (e.g., AES256, aws:kms).
+ // If you specified server-side encryption either with an AWS KMS customer master
+ // key (CMK) or Amazon S3-managed encryption key in your PUT request, the response
+ // includes this header. It confirms the encryption algorithm that Amazon S3
+ // used to encrypt the object.
ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
// Version of the object.
@@ -21000,13 +26481,20 @@ func (s *PutObjectOutput) SetVersionId(v string) *PutObjectOutput {
type PutObjectRetentionInput struct {
_ struct{} `locationName:"PutObjectRetentionRequest" type:"structure" payload:"Retention"`
- // The bucket that contains the object you want to apply this Object Retention
+ // The bucket name that contains the object you want to apply this Object Retention
// configuration to.
//
+ // When using this API with an access point, you must direct requests to the
+ // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // When using this operation using an access point through the AWS SDKs, you
+ // provide the access point ARN in place of the bucket name. For more information
+ // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
+ // in the Amazon Simple Storage Service Developer Guide.
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // Indicates whether this operation should bypass Governance-mode restrictions.j
+ // Indicates whether this operation should bypass Governance-mode restrictions.
BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"`
// The key name for the object that you want to apply this Object Retention
@@ -21015,10 +26503,11 @@ type PutObjectRetentionInput struct {
// Key is a required field
Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
- // Confirms that the requester knows that she or he will be charged for the
- // request. Bucket owners need not specify this parameter in their requests.
- // Documentation on downloading objects from requester pays buckets can be found
- // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
// The container element for the Object Retention configuration.
@@ -21104,6 +26593,20 @@ func (s *PutObjectRetentionInput) SetVersionId(v string) *PutObjectRetentionInpu
return s
}
+func (s *PutObjectRetentionInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *PutObjectRetentionInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type PutObjectRetentionOutput struct {
_ struct{} `type:"structure"`
@@ -21131,15 +26634,29 @@ func (s *PutObjectRetentionOutput) SetRequestCharged(v string) *PutObjectRetenti
type PutObjectTaggingInput struct {
_ struct{} `locationName:"PutObjectTaggingRequest" type:"structure" payload:"Tagging"`
+ // The bucket name containing the object.
+ //
+ // When using this API with an access point, you must direct requests to the
+ // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // When using this operation using an access point through the AWS SDKs, you
+ // provide the access point ARN in place of the bucket name. For more information
+ // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
+ // in the Amazon Simple Storage Service Developer Guide.
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+ // Name of the object key.
+ //
// Key is a required field
Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+ // Container for the TagSet and Tag elements
+ //
// Tagging is a required field
Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
+ // The versionId of the object that the tag-set will be added to.
VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
}
@@ -21214,9 +26731,24 @@ func (s *PutObjectTaggingInput) SetVersionId(v string) *PutObjectTaggingInput {
return s
}
+func (s *PutObjectTaggingInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *PutObjectTaggingInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type PutObjectTaggingOutput struct {
_ struct{} `type:"structure"`
+ // The versionId of the object the tag-set was added to.
VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
}
@@ -21303,6 +26835,20 @@ func (s *PutPublicAccessBlockInput) SetPublicAccessBlockConfiguration(v *PublicA
return s
}
+func (s *PutPublicAccessBlockInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *PutPublicAccessBlockInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type PutPublicAccessBlockOutput struct {
_ struct{} `type:"structure"`
}
@@ -21322,6 +26868,8 @@ func (s PutPublicAccessBlockOutput) GoString() string {
type QueueConfiguration struct {
_ struct{} `type:"structure"`
+ // A collection of bucket events for which to send notifications
+ //
// Events is a required field
Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"`
@@ -21391,6 +26939,10 @@ func (s *QueueConfiguration) SetQueueArn(v string) *QueueConfiguration {
return s
}
+// This data type is deprecated. Use QueueConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_QueueConfiguration.html)
+// for the same purposes. This data type specifies the configuration for publishing
+// messages to an Amazon Simple Queue Service (Amazon SQS) queue when Amazon
+// S3 detects specified events.
type QueueConfigurationDeprecated struct {
_ struct{} `type:"structure"`
@@ -21399,12 +26951,15 @@ type QueueConfigurationDeprecated struct {
// Deprecated: Event has been deprecated
Event *string `deprecated:"true" type:"string" enum:"Event"`
+ // A collection of bucket events for which to send notifications
Events []*string `locationName:"Event" type:"list" flattened:"true"`
// An optional unique identifier for configurations in a notification configuration.
// If you don't provide one, Amazon S3 will assign an ID.
Id *string `type:"string"`
+ // The Amazon Resource Name (ARN) of the Amazon SQS queue to which Amazon S3
+ // publishes a message when it detects events of the specified type.
Queue *string `type:"string"`
}
@@ -21442,6 +26997,7 @@ func (s *QueueConfigurationDeprecated) SetQueue(v string) *QueueConfigurationDep
return s
}
+// The container for the records event.
type RecordsEvent struct {
_ struct{} `locationName:"RecordsEvent" type:"structure" payload:"Payload"`
@@ -21481,6 +27037,15 @@ func (s *RecordsEvent) UnmarshalEvent(
return nil
}
+// MarshalEvent marshals the type into an stream event value. This method
+// should only used internally within the SDK's EventStream handling.
+func (s *RecordsEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) {
+ msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType))
+ msg.Headers.Set(":content-type", eventstream.StringValue("application/octet-stream"))
+ msg.Payload = s.Payload
+ return msg, err
+}
+
// Specifies how requests are redirected. In the event of an error, you can
// specify a different error code to return.
type Redirect struct {
@@ -21608,7 +27173,7 @@ type ReplicationConfiguration struct {
// The Amazon Resource Name (ARN) of the AWS Identity and Access Management
// (IAM) role that Amazon S3 assumes when replicating objects. For more information,
- // see How to Set Up Cross-Region Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr-how-setup.html)
+ // see How to Set Up Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-how-setup.html)
// in the Amazon Simple Storage Service Developer Guide.
//
// Role is a required field
@@ -21673,14 +27238,30 @@ func (s *ReplicationConfiguration) SetRules(v []*ReplicationRule) *ReplicationCo
type ReplicationRule struct {
_ struct{} `type:"structure"`
- // Specifies whether Amazon S3 should replicate delete makers.
+ // Specifies whether Amazon S3 replicates the delete markers. If you specify
+ // a Filter, you must specify this element. However, in the latest version of
+ // replication configuration (when Filter is specified), Amazon S3 doesn't replicate
+ // delete markers. Therefore, the DeleteMarkerReplication element can contain
+ // only Disabled. For an example configuration, see Basic Rule
+ // Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config).
+ //
+ // If you don't specify the Filter element, Amazon S3 assumes that the replication
+ // configuration is the earlier version, V1. In the earlier version, Amazon
+ // S3 handled replication of delete markers differently. For more information,
+ // see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations).
DeleteMarkerReplication *DeleteMarkerReplication `type:"structure"`
- // A container for information about the replication destination.
+ // A container for information about the replication destination and its configurations
+ // including enabling the S3 Replication Time Control (S3 RTC).
//
// Destination is a required field
Destination *Destination `type:"structure" required:"true"`
+ // Optional configuration to replicate existing source bucket objects. For more
+ // information, see Replicating Existing Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication)
+ // in the Amazon S3 Developer Guide.
+ ExistingObjectReplication *ExistingObjectReplication `type:"structure"`
+
// A filter that identifies the subset of objects to which the replication rule
// applies. A Filter must specify exactly one Prefix, Tag, or an And child element.
Filter *ReplicationRuleFilter `type:"structure"`
@@ -21688,9 +27269,9 @@ type ReplicationRule struct {
// A unique identifier for the rule. The maximum value is 255 characters.
ID *string `type:"string"`
- // An object keyname prefix that identifies the object or objects to which the
- // rule applies. The maximum prefix length is 1,024 characters. To include all
- // objects in a bucket, specify an empty string.
+ // An object key name prefix that identifies the object or objects to which
+ // the rule applies. The maximum prefix length is 1,024 characters. To include
+ // all objects in a bucket, specify an empty string.
//
// Deprecated: Prefix has been deprecated
Prefix *string `deprecated:"true" type:"string"`
@@ -21700,21 +27281,21 @@ type ReplicationRule struct {
// when filtering. If two or more rules identify the same object based on a
// specified filter, the rule with higher priority takes precedence. For example:
//
- // * Same object quality prefix based filter criteria If prefixes you specified
+ // * Same object quality prefix-based filter criteria if prefixes you specified
// in multiple rules overlap
//
- // * Same object qualify tag based filter criteria specified in multiple
+ // * Same object qualify tag-based filter criteria specified in multiple
// rules
//
- // For more information, see Cross-Region Replication (CRR) (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html)
- // in the Amazon S3 Developer Guide.
+ // For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html)
+ // in the Amazon Simple Storage Service Developer Guide.
Priority *int64 `type:"integer"`
// A container that describes additional filters for identifying the source
// objects that you want to replicate. You can choose to enable or disable the
// replication of these objects. Currently, Amazon S3 supports only the filter
// that you can specify for objects created with server-side encryption using
- // an AWS KMS-Managed Key (SSE-KMS).
+ // a customer master key (CMK) stored in AWS Key Management Service (SSE-KMS).
SourceSelectionCriteria *SourceSelectionCriteria `type:"structure"`
// Specifies whether the rule is enabled.
@@ -21747,6 +27328,11 @@ func (s *ReplicationRule) Validate() error {
invalidParams.AddNested("Destination", err.(request.ErrInvalidParams))
}
}
+ if s.ExistingObjectReplication != nil {
+ if err := s.ExistingObjectReplication.Validate(); err != nil {
+ invalidParams.AddNested("ExistingObjectReplication", err.(request.ErrInvalidParams))
+ }
+ }
if s.Filter != nil {
if err := s.Filter.Validate(); err != nil {
invalidParams.AddNested("Filter", err.(request.ErrInvalidParams))
@@ -21776,6 +27362,12 @@ func (s *ReplicationRule) SetDestination(v *Destination) *ReplicationRule {
return s
}
+// SetExistingObjectReplication sets the ExistingObjectReplication field's value.
+func (s *ReplicationRule) SetExistingObjectReplication(v *ExistingObjectReplication) *ReplicationRule {
+ s.ExistingObjectReplication = v
+ return s
+}
+
// SetFilter sets the Filter field's value.
func (s *ReplicationRule) SetFilter(v *ReplicationRuleFilter) *ReplicationRule {
s.Filter = v
@@ -21812,11 +27404,25 @@ func (s *ReplicationRule) SetStatus(v string) *ReplicationRule {
return s
}
+// A container for specifying rule filters. The filters determine the subset
+// of objects to which the rule applies. This element is required only if you
+// specify more than one filter.
+//
+// For example:
+//
+// * If you specify both a Prefix and a Tag filter, wrap these filters in
+// an And tag.
+//
+// * If you specify a filter based on multiple tags, wrap the Tag elements
+// in an And tag
type ReplicationRuleAndOperator struct {
_ struct{} `type:"structure"`
+ // An object key name prefix that identifies the subset of objects to which
+ // the rule applies.
Prefix *string `type:"string"`
+ // An array of tags containing key and value pairs.
Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"`
}
@@ -21878,8 +27484,8 @@ type ReplicationRuleFilter struct {
// in an And tag.
And *ReplicationRuleAndOperator `type:"structure"`
- // An object keyname prefix that identifies the subset of objects to which the
- // rule applies.
+ // An object key name prefix that identifies the subset of objects to which
+ // the rule applies.
Prefix *string `type:"string"`
// A container for specifying a tag key and value.
@@ -21936,6 +27542,91 @@ func (s *ReplicationRuleFilter) SetTag(v *Tag) *ReplicationRuleFilter {
return s
}
+// A container specifying S3 Replication Time Control (S3 RTC) related information,
+// including whether S3 RTC is enabled and the time when all objects and operations
+// on objects must be replicated. Must be specified together with a Metrics
+// block.
+type ReplicationTime struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies whether the replication time is enabled.
+ //
+ // Status is a required field
+ Status *string `type:"string" required:"true" enum:"ReplicationTimeStatus"`
+
+ // A container specifying the time by which replication should be complete for
+ // all objects and operations on objects.
+ //
+ // Time is a required field
+ Time *ReplicationTimeValue `type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s ReplicationTime) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ReplicationTime) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ReplicationTime) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ReplicationTime"}
+ if s.Status == nil {
+ invalidParams.Add(request.NewErrParamRequired("Status"))
+ }
+ if s.Time == nil {
+ invalidParams.Add(request.NewErrParamRequired("Time"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetStatus sets the Status field's value.
+func (s *ReplicationTime) SetStatus(v string) *ReplicationTime {
+ s.Status = &v
+ return s
+}
+
+// SetTime sets the Time field's value.
+func (s *ReplicationTime) SetTime(v *ReplicationTimeValue) *ReplicationTime {
+ s.Time = v
+ return s
+}
+
+// A container specifying the time value for S3 Replication Time Control (S3
+// RTC) and replication metrics EventThreshold.
+type ReplicationTimeValue struct {
+ _ struct{} `type:"structure"`
+
+ // Contains an integer specifying time in minutes.
+ //
+ // Valid values: 15 minutes.
+ Minutes *int64 `type:"integer"`
+}
+
+// String returns the string representation
+func (s ReplicationTimeValue) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ReplicationTimeValue) GoString() string {
+ return s.String()
+}
+
+// SetMinutes sets the Minutes field's value.
+func (s *ReplicationTimeValue) SetMinutes(v int64) *ReplicationTimeValue {
+ s.Minutes = &v
+ return s
+}
+
+// Container for Payer.
type RequestPaymentConfiguration struct {
_ struct{} `type:"structure"`
@@ -21974,6 +27665,7 @@ func (s *RequestPaymentConfiguration) SetPayer(v string) *RequestPaymentConfigur
return s
}
+// Container for specifying if periodic QueryProgress messages should be sent.
type RequestProgress struct {
_ struct{} `type:"structure"`
@@ -22001,21 +27693,34 @@ func (s *RequestProgress) SetEnabled(v bool) *RequestProgress {
type RestoreObjectInput struct {
_ struct{} `locationName:"RestoreObjectRequest" type:"structure" payload:"RestoreRequest"`
+ // The bucket name or containing the object to restore.
+ //
+ // When using this API with an access point, you must direct requests to the
+ // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // When using this operation using an access point through the AWS SDKs, you
+ // provide the access point ARN in place of the bucket name. For more information
+ // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
+ // in the Amazon Simple Storage Service Developer Guide.
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+ // Object key for which the operation was initiated.
+ //
// Key is a required field
Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
- // Confirms that the requester knows that she or he will be charged for the
- // request. Bucket owners need not specify this parameter in their requests.
- // Documentation on downloading objects from requester pays buckets can be found
- // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
// Container for restore job parameters.
RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
+ // VersionId used to reference a specific version of the object.
VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
}
@@ -22093,6 +27798,20 @@ func (s *RestoreObjectInput) SetVersionId(v string) *RestoreObjectInput {
return s
}
+func (s *RestoreObjectInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *RestoreObjectInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type RestoreObjectOutput struct {
_ struct{} `type:"structure"`
@@ -22138,7 +27857,7 @@ type RestoreRequest struct {
// The optional description for the job.
Description *string `type:"string"`
- // Glacier related parameters pertaining to this job. Do not use with restores
+ // S3 Glacier related parameters pertaining to this job. Do not use with restores
// that specify OutputLocation.
GlacierJobParameters *GlacierJobParameters `type:"structure"`
@@ -22148,7 +27867,7 @@ type RestoreRequest struct {
// Describes the parameters for Select job types.
SelectParameters *SelectParameters `type:"structure"`
- // Glacier retrieval tier at which the restore will be processed.
+ // S3 Glacier retrieval tier at which the restore will be processed.
Tier *string `type:"string" enum:"Tier"`
// Type of restore request.
@@ -22232,7 +27951,10 @@ func (s *RestoreRequest) SetType(v string) *RestoreRequest {
return s
}
-// Specifies the redirect behavior and when a redirect is applied.
+// Specifies the redirect behavior and when a redirect is applied. For more
+// information about routing rules, see Configuring advanced conditional redirects
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html#advanced-conditional-redirects)
+// in the Amazon Simple Storage Service Developer Guide.
type RoutingRule struct {
_ struct{} `type:"structure"`
@@ -22286,8 +28008,9 @@ func (s *RoutingRule) SetRedirect(v *Redirect) *RoutingRule {
}
// Specifies lifecycle rules for an Amazon S3 bucket. For more information,
-// see PUT Bucket lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlifecycle.html)
-// in the Amazon Simple Storage Service API Reference.
+// see Put Bucket Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlifecycle.html)
+// in the Amazon Simple Storage Service API Reference. For examples, see Put
+// Bucket Lifecycle Configuration Examples (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html#API_PutBucketLifecycleConfiguration_Examples)
type Rule struct {
_ struct{} `type:"structure"`
@@ -22298,6 +28021,7 @@ type Rule struct {
// in the Amazon Simple Storage Service Developer Guide.
AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"`
+ // Specifies the expiration for the lifecycle of the object.
Expiration *LifecycleExpiration `type:"structure"`
// Unique identifier for the rule. The value can't be longer than 255 characters.
@@ -22331,7 +28055,10 @@ type Rule struct {
// Status is a required field
Status *string `type:"string" required:"true" enum:"ExpirationStatus"`
- // Specifies when an object transitions to a specified storage class.
+ // Specifies when an object transitions to a specified storage class. For more
+ // information about Amazon S3 lifecycle configuration rules, see Transitioning
+ // Objects Using Amazon S3 Lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html)
+ // in the Amazon Simple Storage Service Developer Guide.
Transition *Transition `type:"structure"`
}
@@ -22409,12 +28136,12 @@ func (s *Rule) SetTransition(v *Transition) *Rule {
return s
}
-// Specifies the use of SSE-KMS to encrypt delivered Inventory reports.
+// Specifies the use of SSE-KMS to encrypt delivered inventory reports.
type SSEKMS struct {
_ struct{} `locationName:"SSE-KMS" type:"structure"`
- // Specifies the ID of the AWS Key Management Service (KMS) master encryption
- // key to use for encrypting Inventory reports.
+ // Specifies the ID of the AWS Key Management Service (AWS KMS) symmetric customer
+ // managed customer master key (CMK) to use for encrypting inventory reports.
//
// KeyId is a required field
KeyId *string `type:"string" required:"true" sensitive:"true"`
@@ -22449,7 +28176,7 @@ func (s *SSEKMS) SetKeyId(v string) *SSEKMS {
return s
}
-// Specifies the use of SSE-S3 to encrypt delivered Inventory reports.
+// Specifies the use of SSE-S3 to encrypt delivered inventory reports.
type SSES3 struct {
_ struct{} `locationName:"SSE-S3" type:"structure"`
}
@@ -22464,75 +28191,51 @@ func (s SSES3) GoString() string {
return s.String()
}
-// SelectObjectContentEventStream provides handling of EventStreams for
-// the SelectObjectContent API.
-//
-// Use this type to receive SelectObjectContentEventStream events. The events
-// can be read from the Events channel member.
-//
-// The events that can be received are:
-//
-// * ContinuationEvent
-// * EndEvent
-// * ProgressEvent
-// * RecordsEvent
-// * StatsEvent
-type SelectObjectContentEventStream struct {
- // Reader is the EventStream reader for the SelectObjectContentEventStream
- // events. This value is automatically set by the SDK when the API call is made
- // Use this member when unit testing your code with the SDK to mock out the
- // EventStream Reader.
- //
- // Must not be nil.
- Reader SelectObjectContentEventStreamReader
+// Specifies the byte range of the object to get the records from. A record
+// is processed when its first byte is contained by the range. This parameter
+// is optional, but when specified, it must not be empty. See RFC 2616, Section
+// 14.35.1 about how to specify the start and end of the range.
+type ScanRange struct {
+ _ struct{} `type:"structure"`
- // StreamCloser is the io.Closer for the EventStream connection. For HTTP
- // EventStream this is the response Body. The stream will be closed when
- // the Close method of the EventStream is called.
- StreamCloser io.Closer
+ // Specifies the end of the byte range. This parameter is optional. Valid values:
+ // non-negative integers. The default value is one less than the size of the
+ // object being queried. If only the End parameter is supplied, it is interpreted
+ // to mean scan the last N bytes of the file. For example, 50
+ // means scan the last 50 bytes.
+ End *int64 `type:"long"`
+
+ // Specifies the start of the byte range. This parameter is optional. Valid
+ // values: non-negative integers. The default value is 0. If only start is supplied,
+ // it means scan from that point to the end of the file.For example; 50
+ // means scan from byte 50 until the end of the file.
+ Start *int64 `type:"long"`
}
-// Close closes the EventStream. This will also cause the Events channel to be
-// closed. You can use the closing of the Events channel to terminate your
-// application's read from the API's EventStream.
-//
-// Will close the underlying EventStream reader. For EventStream over HTTP
-// connection this will also close the HTTP connection.
-//
-// Close must be called when done using the EventStream API. Not calling Close
-// may result in resource leaks.
-func (es *SelectObjectContentEventStream) Close() (err error) {
- es.Reader.Close()
- return es.Err()
+// String returns the string representation
+func (s ScanRange) String() string {
+ return awsutil.Prettify(s)
}
-// Err returns any error that occurred while reading EventStream Events from
-// the service API's response. Returns nil if there were no errors.
-func (es *SelectObjectContentEventStream) Err() error {
- if err := es.Reader.Err(); err != nil {
- return err
- }
- es.StreamCloser.Close()
+// GoString returns the string representation
+func (s ScanRange) GoString() string {
+ return s.String()
+}
- return nil
+// SetEnd sets the End field's value.
+func (s *ScanRange) SetEnd(v int64) *ScanRange {
+ s.End = &v
+ return s
}
-// Events returns a channel to read EventStream Events from the
-// SelectObjectContent API.
-//
-// These events are:
-//
-// * ContinuationEvent
-// * EndEvent
-// * ProgressEvent
-// * RecordsEvent
-// * StatsEvent
-func (es *SelectObjectContentEventStream) Events() <-chan SelectObjectContentEventStreamEvent {
- return es.Reader.Events()
+// SetStart sets the Start field's value.
+func (s *ScanRange) SetStart(v int64) *ScanRange {
+ s.Start = &v
+ return s
}
// SelectObjectContentEventStreamEvent groups together all EventStream
-// events read from the SelectObjectContent API.
+// events writes for SelectObjectContentEventStream.
//
// These events are:
//
@@ -22543,11 +28246,12 @@ func (es *SelectObjectContentEventStream) Events() <-chan SelectObjectContentEve
// * StatsEvent
type SelectObjectContentEventStreamEvent interface {
eventSelectObjectContentEventStream()
+ eventstreamapi.Marshaler
+ eventstreamapi.Unmarshaler
}
-// SelectObjectContentEventStreamReader provides the interface for reading EventStream
-// Events from the SelectObjectContent API. The
-// default implementation for this interface will be SelectObjectContentEventStream.
+// SelectObjectContentEventStreamReader provides the interface for reading to the stream. The
+// default implementation for this interface will be SelectObjectContentEventStreamData.
//
// The reader's Close method must allow multiple concurrent calls.
//
@@ -22558,12 +28262,12 @@ type SelectObjectContentEventStreamEvent interface {
// * ProgressEvent
// * RecordsEvent
// * StatsEvent
+// * SelectObjectContentEventStreamUnknownEvent
type SelectObjectContentEventStreamReader interface {
// Returns a channel of events as they are read from the event stream.
Events() <-chan SelectObjectContentEventStreamEvent
- // Close will close the underlying event stream reader. For event stream over
- // HTTP this will also close the HTTP connection.
+ // Close will stop the reader reading events from the stream.
Close() error
// Returns any error that has occurred while reading from the event stream.
@@ -22573,57 +28277,44 @@ type SelectObjectContentEventStreamReader interface {
type readSelectObjectContentEventStream struct {
eventReader *eventstreamapi.EventReader
stream chan SelectObjectContentEventStreamEvent
- errVal atomic.Value
+ err *eventstreamapi.OnceError
done chan struct{}
closeOnce sync.Once
}
-func newReadSelectObjectContentEventStream(
- reader io.ReadCloser,
- unmarshalers request.HandlerList,
- logger aws.Logger,
- logLevel aws.LogLevelType,
-) *readSelectObjectContentEventStream {
+func newReadSelectObjectContentEventStream(eventReader *eventstreamapi.EventReader) *readSelectObjectContentEventStream {
r := &readSelectObjectContentEventStream{
- stream: make(chan SelectObjectContentEventStreamEvent),
- done: make(chan struct{}),
+ eventReader: eventReader,
+ stream: make(chan SelectObjectContentEventStreamEvent),
+ done: make(chan struct{}),
+ err: eventstreamapi.NewOnceError(),
}
-
- r.eventReader = eventstreamapi.NewEventReader(
- reader,
- protocol.HandlerPayloadUnmarshal{
- Unmarshalers: unmarshalers,
- },
- r.unmarshalerForEventType,
- )
- r.eventReader.UseLogger(logger, logLevel)
+ go r.readEventStream()
return r
}
-// Close will close the underlying event stream reader. For EventStream over
-// HTTP this will also close the HTTP connection.
+// Close will close the underlying event stream reader.
func (r *readSelectObjectContentEventStream) Close() error {
r.closeOnce.Do(r.safeClose)
-
return r.Err()
}
+func (r *readSelectObjectContentEventStream) ErrorSet() <-chan struct{} {
+ return r.err.ErrorSet()
+}
+
+func (r *readSelectObjectContentEventStream) Closed() <-chan struct{} {
+ return r.done
+}
+
func (r *readSelectObjectContentEventStream) safeClose() {
close(r.done)
- err := r.eventReader.Close()
- if err != nil {
- r.errVal.Store(err)
- }
}
func (r *readSelectObjectContentEventStream) Err() error {
- if v := r.errVal.Load(); v != nil {
- return v.(error)
- }
-
- return nil
+ return r.err.Err()
}
func (r *readSelectObjectContentEventStream) Events() <-chan SelectObjectContentEventStreamEvent {
@@ -22631,6 +28322,7 @@ func (r *readSelectObjectContentEventStream) Events() <-chan SelectObjectContent
}
func (r *readSelectObjectContentEventStream) readEventStream() {
+ defer r.Close()
defer close(r.stream)
for {
@@ -22645,7 +28337,10 @@ func (r *readSelectObjectContentEventStream) readEventStream() {
return
default:
}
- r.errVal.Store(err)
+ if _, ok := err.(*eventstreamapi.UnknownMessageTypeError); ok {
+ continue
+ }
+ r.err.SetError(err)
return
}
@@ -22657,33 +28352,56 @@ func (r *readSelectObjectContentEventStream) readEventStream() {
}
}
-func (r *readSelectObjectContentEventStream) unmarshalerForEventType(
- eventType string,
-) (eventstreamapi.Unmarshaler, error) {
+type unmarshalerForSelectObjectContentEventStreamEvent struct {
+ metadata protocol.ResponseMetadata
+}
+
+func (u unmarshalerForSelectObjectContentEventStreamEvent) UnmarshalerForEventName(eventType string) (eventstreamapi.Unmarshaler, error) {
switch eventType {
case "Cont":
return &ContinuationEvent{}, nil
-
case "End":
return &EndEvent{}, nil
-
case "Progress":
return &ProgressEvent{}, nil
-
case "Records":
return &RecordsEvent{}, nil
-
case "Stats":
return &StatsEvent{}, nil
default:
- return nil, awserr.New(
- request.ErrCodeSerialization,
- fmt.Sprintf("unknown event type name, %s, for SelectObjectContentEventStream", eventType),
- nil,
- )
+ return &SelectObjectContentEventStreamUnknownEvent{Type: eventType}, nil
}
}
+// SelectObjectContentEventStreamUnknownEvent provides a failsafe event for the
+// SelectObjectContentEventStream group of events when an unknown event is received.
+type SelectObjectContentEventStreamUnknownEvent struct {
+ Type string
+ Message eventstream.Message
+}
+
+// The SelectObjectContentEventStreamUnknownEvent is and event in the SelectObjectContentEventStream
+// group of events.
+func (s *SelectObjectContentEventStreamUnknownEvent) eventSelectObjectContentEventStream() {}
+
+// MarshalEvent marshals the type into an stream event value. This method
+// should only used internally within the SDK's EventStream handling.
+func (e *SelectObjectContentEventStreamUnknownEvent) MarshalEvent(pm protocol.PayloadMarshaler) (
+ msg eventstream.Message, err error,
+) {
+ return e.Message.Clone(), nil
+}
+
+// UnmarshalEvent unmarshals the EventStream Message into the SelectObjectContentEventStreamData value.
+// This method is only used internally within the SDK's EventStream handling.
+func (e *SelectObjectContentEventStreamUnknownEvent) UnmarshalEvent(
+ payloadUnmarshaler protocol.PayloadUnmarshaler,
+ msg eventstream.Message,
+) error {
+ e.Message = msg.Clone()
+ return nil
+}
+
// Request to filter the contents of an Amazon S3 object based on a simple Structured
// Query Language (SQL) statement. In the request, along with the SQL expression,
// you must specify a data serialization format (JSON or CSV) of the object.
@@ -22704,7 +28422,7 @@ type SelectObjectContentInput struct {
// Expression is a required field
Expression *string `type:"string" required:"true"`
- // The type of the provided expression (for example., SQL).
+ // The type of the provided expression (for example, SQL).
//
// ExpressionType is a required field
ExpressionType *string `type:"string" required:"true" enum:"ExpressionType"`
@@ -22738,6 +28456,24 @@ type SelectObjectContentInput struct {
// The SSE Customer Key MD5. For more information, see Server-Side Encryption
// (Using Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html).
SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // Specifies the byte range of the object to get the records from. A record
+ // is processed when its first byte is contained by the range. This parameter
+ // is optional, but when specified, it must not be empty. See RFC 2616, Section
+ // 14.35.1 about how to specify the start and end of the range.
+ //
+ // ScanRangemay be used in the following ways:
+ //
+ // * 50100 - process only
+ // the records starting between the bytes 50 and 100 (inclusive, counting
+ // from zero)
+ //
+ // * 50 - process only the records
+ // starting after the byte 50
+ //
+ // * 50 - process only the records within
+ // the last 50 bytes of the file.
+ ScanRange *ScanRange `type:"structure"`
}
// String returns the string representation
@@ -22858,11 +28594,30 @@ func (s *SelectObjectContentInput) SetSSECustomerKeyMD5(v string) *SelectObjectC
return s
}
+// SetScanRange sets the ScanRange field's value.
+func (s *SelectObjectContentInput) SetScanRange(v *ScanRange) *SelectObjectContentInput {
+ s.ScanRange = v
+ return s
+}
+
+func (s *SelectObjectContentInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *SelectObjectContentInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type SelectObjectContentOutput struct {
_ struct{} `type:"structure" payload:"Payload"`
- // Use EventStream to use the API's stream.
- EventStream *SelectObjectContentEventStream `type:"structure"`
+ EventStream *SelectObjectContentEventStream
}
// String returns the string representation
@@ -22875,29 +28630,17 @@ func (s SelectObjectContentOutput) GoString() string {
return s.String()
}
-// SetEventStream sets the EventStream field's value.
func (s *SelectObjectContentOutput) SetEventStream(v *SelectObjectContentEventStream) *SelectObjectContentOutput {
s.EventStream = v
return s
}
+func (s *SelectObjectContentOutput) GetEventStream() *SelectObjectContentEventStream {
+ return s.EventStream
+}
-func (s *SelectObjectContentOutput) runEventStreamLoop(r *request.Request) {
- if r.Error != nil {
- return
- }
- reader := newReadSelectObjectContentEventStream(
- r.HTTPResponse.Body,
- r.Handlers.UnmarshalStream,
- r.Config.Logger,
- r.Config.LogLevel.Value(),
- )
- go reader.readEventStream()
-
- eventStream := &SelectObjectContentEventStream{
- StreamCloser: r.HTTPResponse.Body,
- Reader: reader,
- }
- s.EventStream = eventStream
+// GetStream returns the type to interact with the event stream.
+func (s *SelectObjectContentOutput) GetStream() *SelectObjectContentEventStream {
+ return s.EventStream
}
// Describes the parameters for Select job types.
@@ -22909,7 +28652,7 @@ type SelectParameters struct {
// Expression is a required field
Expression *string `type:"string" required:"true"`
- // The type of the provided expression (e.g., SQL).
+ // The type of the provided expression (for example, SQL).
//
// ExpressionType is a required field
ExpressionType *string `type:"string" required:"true" enum:"ExpressionType"`
@@ -22989,8 +28732,24 @@ func (s *SelectParameters) SetOutputSerialization(v *OutputSerialization) *Selec
type ServerSideEncryptionByDefault struct {
_ struct{} `type:"structure"`
- // KMS master key ID to use for the default encryption. This parameter is allowed
- // if and only if SSEAlgorithm is set to aws:kms.
+ // AWS Key Management Service (KMS) customer master key ID to use for the default
+ // encryption. This parameter is allowed if and only if SSEAlgorithm is set
+ // to aws:kms.
+ //
+ // You can specify the key ID or the Amazon Resource Name (ARN) of the CMK.
+ // However, if you are using encryption with cross-account operations, you must
+ // use a fully qualified CMK ARN. For more information, see Using encryption
+ // for cross-account operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy).
+ //
+ // For example:
+ //
+ // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab
+ //
+ // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab
+ //
+ // Amazon S3 only supports symmetric CMKs and not asymmetric CMKs. For more
+ // information, see Using Symmetric and Asymmetric Keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html)
+ // in the AWS Key Management Service Developer Guide.
KMSMasterKeyID *string `type:"string" sensitive:"true"`
// Server-side encryption algorithm to use for the default encryption.
@@ -23129,7 +28888,7 @@ func (s *ServerSideEncryptionRule) SetApplyServerSideEncryptionByDefault(v *Serv
// objects that you want to replicate. You can choose to enable or disable the
// replication of these objects. Currently, Amazon S3 supports only the filter
// that you can specify for objects created with server-side encryption using
-// an AWS KMS-Managed Key (SSE-KMS).
+// a customer master key (CMK) stored in AWS Key Management Service (SSE-KMS).
type SourceSelectionCriteria struct {
_ struct{} `type:"structure"`
@@ -23176,7 +28935,7 @@ type SseKmsEncryptedObjects struct {
_ struct{} `type:"structure"`
// Specifies whether Amazon S3 replicates objects created with server-side encryption
- // using an AWS KMS-managed key.
+ // using a customer master key (CMK) stored in AWS Key Management Service.
//
// Status is a required field
Status *string `type:"string" required:"true" enum:"SseKmsEncryptedObjectsStatus"`
@@ -23211,6 +28970,7 @@ func (s *SseKmsEncryptedObjects) SetStatus(v string) *SseKmsEncryptedObjects {
return s
}
+// Container for the stats details.
type Stats struct {
_ struct{} `type:"structure"`
@@ -23252,6 +29012,7 @@ func (s *Stats) SetBytesScanned(v int64) *Stats {
return s
}
+// Container for the Stats Event.
type StatsEvent struct {
_ struct{} `locationName:"StatsEvent" type:"structure" payload:"Details"`
@@ -23292,6 +29053,18 @@ func (s *StatsEvent) UnmarshalEvent(
return nil
}
+// MarshalEvent marshals the type into an stream event value. This method
+// should only used internally within the SDK's EventStream handling.
+func (s *StatsEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) {
+ msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType))
+ var buf bytes.Buffer
+ if err = pm.MarshalPayload(&buf, s); err != nil {
+ return eventstream.Message{}, err
+ }
+ msg.Payload = buf.Bytes()
+ return msg, err
+}
+
// Specifies data related to access patterns to be collected and made available
// to analyze the tradeoffs between different storage classes for an Amazon
// S3 bucket.
@@ -23334,6 +29107,8 @@ func (s *StorageClassAnalysis) SetDataExport(v *StorageClassAnalysisDataExport)
return s
}
+// Container for data related to the storage class analysis for an Amazon S3
+// bucket for export.
type StorageClassAnalysisDataExport struct {
_ struct{} `type:"structure"`
@@ -23391,10 +29166,11 @@ func (s *StorageClassAnalysisDataExport) SetOutputSchemaVersion(v string) *Stora
return s
}
+// A container of a key value name pair.
type Tag struct {
_ struct{} `type:"structure"`
- // Name of the tag.
+ // Name of the object key.
//
// Key is a required field
Key *string `min:"1" type:"string" required:"true"`
@@ -23446,9 +29222,12 @@ func (s *Tag) SetValue(v string) *Tag {
return s
}
+// Container for TagSet elements.
type Tagging struct {
_ struct{} `type:"structure"`
+ // A collection for a set of tags
+ //
// TagSet is a required field
TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"`
}
@@ -23492,9 +29271,11 @@ func (s *Tagging) SetTagSet(v []*Tag) *Tagging {
return s
}
+// Container for granting information.
type TargetGrant struct {
_ struct{} `type:"structure"`
+ // Container for the person being granted permissions.
Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"`
// Logging permissions assigned to the Grantee for the bucket.
@@ -23617,6 +29398,11 @@ func (s *TopicConfiguration) SetTopicArn(v string) *TopicConfiguration {
return s
}
+// A container for specifying the configuration for publication of messages
+// to an Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3
+// detects specified events. This data type is deprecated. Use TopicConfiguration
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_TopicConfiguration.html)
+// instead.
type TopicConfigurationDeprecated struct {
_ struct{} `type:"structure"`
@@ -23625,6 +29411,7 @@ type TopicConfigurationDeprecated struct {
// Deprecated: Event has been deprecated
Event *string `deprecated:"true" type:"string" enum:"Event"`
+ // A collection of events related to objects
Events []*string `locationName:"Event" type:"list" flattened:"true"`
// An optional unique identifier for configurations in a notification configuration.
@@ -23670,7 +29457,10 @@ func (s *TopicConfigurationDeprecated) SetTopic(v string) *TopicConfigurationDep
return s
}
-// Specifies when an object transitions to a specified storage class.
+// Specifies when an object transitions to a specified storage class. For more
+// information about Amazon S3 lifecycle configuration rules, see Transitioning
+// Objects Using Amazon S3 Lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html)
+// in the Amazon Simple Storage Service Developer Guide.
type Transition struct {
_ struct{} `type:"structure"`
@@ -23717,6 +29507,8 @@ func (s *Transition) SetStorageClass(v string) *Transition {
type UploadPartCopyInput struct {
_ struct{} `locationName:"UploadPartCopyRequest" type:"structure"`
+ // The bucket name.
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@@ -23742,11 +29534,12 @@ type UploadPartCopyInput struct {
// The range of bytes to copy from the source object. The range value must use
// the form bytes=first-last, where the first and last are the zero-based byte
// offsets to copy. For example, bytes=0-9 indicates that you want to copy the
- // first ten bytes of the source. You can copy a range only if the source object
+ // first 10 bytes of the source. You can copy a range only if the source object
// is greater than 5 MB.
CopySourceRange *string `location:"header" locationName:"x-amz-copy-source-range" type:"string"`
- // Specifies the algorithm to use when decrypting the source object (e.g., AES256).
+ // Specifies the algorithm to use when decrypting the source object (for example,
+ // AES256).
CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"`
// Specifies the customer-provided encryption key for Amazon S3 to use to decrypt
@@ -23755,10 +29548,12 @@ type UploadPartCopyInput struct {
CopySourceSSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"`
// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
- // Amazon S3 uses this header for a message integrity check to ensure the encryption
- // key was transmitted without error.
+ // Amazon S3 uses this header for a message integrity check to ensure that the
+ // encryption key was transmitted without error.
CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"`
+ // Object key for which the multipart upload was initiated.
+ //
// Key is a required field
Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
@@ -23768,26 +29563,28 @@ type UploadPartCopyInput struct {
// PartNumber is a required field
PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"`
- // Confirms that the requester knows that she or he will be charged for the
- // request. Bucket owners need not specify this parameter in their requests.
- // Documentation on downloading objects from requester pays buckets can be found
- // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
- // Specifies the algorithm to use to when encrypting the object (e.g., AES256).
+ // Specifies the algorithm to use to when encrypting the object (for example,
+ // AES256).
SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
// Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
// data. This value is used to store the object and then it is discarded; Amazon
- // does not store the encryption key. The key must be appropriate for use with
- // the algorithm specified in the x-amz-server-side-encryption-customer-algorithm
+ // S3 does not store the encryption key. The key must be appropriate for use
+ // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm
// header. This must be the same encryption key specified in the initiate multipart
// upload request.
SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
- // Amazon S3 uses this header for a message integrity check to ensure the encryption
- // key was transmitted without error.
+ // Amazon S3 uses this header for a message integrity check to ensure that the
+ // encryption key was transmitted without error.
SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
// Upload ID identifying the multipart upload whose part is being copied.
@@ -23960,9 +29757,24 @@ func (s *UploadPartCopyInput) SetUploadId(v string) *UploadPartCopyInput {
return s
}
+func (s *UploadPartCopyInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *UploadPartCopyInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type UploadPartCopyOutput struct {
_ struct{} `type:"structure" payload:"CopyPartResult"`
+ // Container for all response elements.
CopyPartResult *CopyPartResult `type:"structure"`
// The version of the source object that was copied, if you have enabled versioning
@@ -23979,16 +29791,17 @@ type UploadPartCopyOutput struct {
SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
// If server-side encryption with a customer-provided encryption key was requested,
- // the response will include this header to provide round trip message integrity
+ // the response will include this header to provide round-trip message integrity
// verification of the customer-provided encryption key.
SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
- // If present, specifies the ID of the AWS Key Management Service (KMS) master
- // encryption key that was used for the object.
+ // If present, specifies the ID of the AWS Key Management Service (AWS KMS)
+ // symmetric customer managed customer master key (CMK) that was used for the
+ // object.
SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
- // The Server-side encryption algorithm used when storing this object in S3
- // (e.g., AES256, aws:kms).
+ // The server-side encryption algorithm used when storing this object in Amazon
+ // S3 (for example, AES256, aws:kms).
ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
}
@@ -24060,7 +29873,7 @@ type UploadPartInput struct {
ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"`
// The base64-encoded 128-bit MD5 digest of the part data. This parameter is
- // auto-populated when using the command from the CLI. This parameted is required
+ // auto-populated when using the command from the CLI. This parameter is required
// if object lock parameters are specified.
ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"`
@@ -24075,26 +29888,28 @@ type UploadPartInput struct {
// PartNumber is a required field
PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"`
- // Confirms that the requester knows that she or he will be charged for the
- // request. Bucket owners need not specify this parameter in their requests.
- // Documentation on downloading objects from requester pays buckets can be found
- // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
- // Specifies the algorithm to use to when encrypting the object (e.g., AES256).
+ // Specifies the algorithm to use to when encrypting the object (for example,
+ // AES256).
SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
// Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
// data. This value is used to store the object and then it is discarded; Amazon
- // does not store the encryption key. The key must be appropriate for use with
- // the algorithm specified in the x-amz-server-side-encryption-customer-algorithm
+ // S3 does not store the encryption key. The key must be appropriate for use
+ // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm
// header. This must be the same encryption key specified in the initiate multipart
// upload request.
SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
- // Amazon S3 uses this header for a message integrity check to ensure the encryption
- // key was transmitted without error.
+ // Amazon S3 uses this header for a message integrity check to ensure that the
+ // encryption key was transmitted without error.
SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
// Upload ID identifying the multipart upload whose part is being uploaded.
@@ -24221,6 +30036,20 @@ func (s *UploadPartInput) SetUploadId(v string) *UploadPartInput {
return s
}
+func (s *UploadPartInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *UploadPartInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
type UploadPartOutput struct {
_ struct{} `type:"structure"`
@@ -24237,16 +30066,16 @@ type UploadPartOutput struct {
SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
// If server-side encryption with a customer-provided encryption key was requested,
- // the response will include this header to provide round trip message integrity
+ // the response will include this header to provide round-trip message integrity
// verification of the customer-provided encryption key.
SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
- // If present, specifies the ID of the AWS Key Management Service (KMS) master
- // encryption key that was used for the object.
+ // If present, specifies the ID of the AWS Key Management Service (AWS KMS)
+ // symmetric customer managed customer master key (CMK) was used for the object.
SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
- // The Server-side encryption algorithm used when storing this object in S3
- // (e.g., AES256, aws:kms).
+ // The server-side encryption algorithm used when storing this object in Amazon
+ // S3 (for example, AES256, aws:kms).
ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
}
@@ -24561,11 +30390,37 @@ const (
// EventS3ObjectRemovedDeleteMarkerCreated is a Event enum value
EventS3ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated"
+ // EventS3ObjectRestore is a Event enum value
+ EventS3ObjectRestore = "s3:ObjectRestore:*"
+
// EventS3ObjectRestorePost is a Event enum value
EventS3ObjectRestorePost = "s3:ObjectRestore:Post"
// EventS3ObjectRestoreCompleted is a Event enum value
EventS3ObjectRestoreCompleted = "s3:ObjectRestore:Completed"
+
+ // EventS3Replication is a Event enum value
+ EventS3Replication = "s3:Replication:*"
+
+ // EventS3ReplicationOperationFailedReplication is a Event enum value
+ EventS3ReplicationOperationFailedReplication = "s3:Replication:OperationFailedReplication"
+
+ // EventS3ReplicationOperationNotTracked is a Event enum value
+ EventS3ReplicationOperationNotTracked = "s3:Replication:OperationNotTracked"
+
+ // EventS3ReplicationOperationMissedThreshold is a Event enum value
+ EventS3ReplicationOperationMissedThreshold = "s3:Replication:OperationMissedThreshold"
+
+ // EventS3ReplicationOperationReplicatedAfterThreshold is a Event enum value
+ EventS3ReplicationOperationReplicatedAfterThreshold = "s3:Replication:OperationReplicatedAfterThreshold"
+)
+
+const (
+ // ExistingObjectReplicationStatusEnabled is a ExistingObjectReplicationStatus enum value
+ ExistingObjectReplicationStatusEnabled = "Enabled"
+
+ // ExistingObjectReplicationStatusDisabled is a ExistingObjectReplicationStatus enum value
+ ExistingObjectReplicationStatusDisabled = "Disabled"
)
const (
@@ -24657,6 +30512,9 @@ const (
// InventoryOptionalFieldObjectLockLegalHoldStatus is a InventoryOptionalField enum value
InventoryOptionalFieldObjectLockLegalHoldStatus = "ObjectLockLegalHoldStatus"
+
+ // InventoryOptionalFieldIntelligentTieringAccessTier is a InventoryOptionalField enum value
+ InventoryOptionalFieldIntelligentTieringAccessTier = "IntelligentTieringAccessTier"
)
const (
@@ -24691,6 +30549,14 @@ const (
MetadataDirectiveReplace = "REPLACE"
)
+const (
+ // MetricsStatusEnabled is a MetricsStatus enum value
+ MetricsStatusEnabled = "Enabled"
+
+ // MetricsStatusDisabled is a MetricsStatus enum value
+ MetricsStatusDisabled = "Disabled"
+)
+
const (
// ObjectCannedACLPrivate is a ObjectCannedACL enum value
ObjectCannedACLPrivate = "private"
@@ -24839,6 +30705,14 @@ const (
ReplicationStatusReplica = "REPLICA"
)
+const (
+ // ReplicationTimeStatusEnabled is a ReplicationTimeStatus enum value
+ ReplicationTimeStatusEnabled = "Enabled"
+
+ // ReplicationTimeStatusDisabled is a ReplicationTimeStatus enum value
+ ReplicationTimeStatusDisabled = "Disabled"
+)
+
// If present, indicates that the requester was successfully charged for the
// request.
const (
@@ -24846,10 +30720,11 @@ const (
RequestChargedRequester = "requester"
)
-// Confirms that the requester knows that she or he will be charged for the
-// request. Bucket owners need not specify this parameter in their requests.
-// Documentation on downloading objects from requester pays buckets can be found
-// at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+// Confirms that the requester knows that they will be charged for the request.
+// Bucket owners need not specify this parameter in their requests. For information
+// about downloading objects from requester pays buckets, see Downloading Objects
+// in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+// in the Amazon S3 Developer Guide.
const (
// RequestPayerRequester is a RequestPayer enum value
RequestPayerRequester = "requester"
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go b/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go
index 5c8ce5cc..407f06b6 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go
@@ -13,7 +13,6 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/internal/sdkio"
)
const (
@@ -25,30 +24,6 @@ const (
appendMD5TxEncoding = "append-md5"
)
-// contentMD5 computes and sets the HTTP Content-MD5 header for requests that
-// require it.
-func contentMD5(r *request.Request) {
- h := md5.New()
-
- if !aws.IsReaderSeekable(r.Body) {
- if r.Config.Logger != nil {
- r.Config.Logger.Log(fmt.Sprintf(
- "Unable to compute Content-MD5 for unseekable body, S3.%s",
- r.Operation.Name))
- }
- return
- }
-
- if _, err := copySeekableBody(h, r.Body); err != nil {
- r.Error = awserr.New("ContentMD5", "failed to compute body MD5", err)
- return
- }
-
- // encode the md5 checksum in base64 and set the request header.
- v := base64.StdEncoding.EncodeToString(h.Sum(nil))
- r.HTTPRequest.Header.Set(contentMD5Header, v)
-}
-
// computeBodyHashes will add Content MD5 and Content Sha256 hashes to the
// request. If the body is not seekable or S3DisableContentMD5Validation set
// this handler will be ignored.
@@ -90,7 +65,7 @@ func computeBodyHashes(r *request.Request) {
dst = io.MultiWriter(hashers...)
}
- if _, err := copySeekableBody(dst, r.Body); err != nil {
+ if _, err := aws.CopySeekableBody(dst, r.Body); err != nil {
r.Error = awserr.New("BodyHashError", "failed to compute body hashes", err)
return
}
@@ -119,28 +94,6 @@ const (
sha256HexEncLen = sha256.Size * 2 // hex.EncodedLen
)
-func copySeekableBody(dst io.Writer, src io.ReadSeeker) (int64, error) {
- curPos, err := src.Seek(0, sdkio.SeekCurrent)
- if err != nil {
- return 0, err
- }
-
- // hash the body. seek back to the first position after reading to reset
- // the body for transmission. copy errors may be assumed to be from the
- // body.
- n, err := io.Copy(dst, src)
- if err != nil {
- return n, err
- }
-
- _, err = src.Seek(curPos, sdkio.SeekStart)
- if err != nil {
- return n, err
- }
-
- return n, nil
-}
-
// Adds the x-amz-te: append_md5 header to the request. This requests the service
// responds with a trailing MD5 checksum.
//
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go
index 23d386b1..a7698d5e 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go
@@ -4,6 +4,7 @@ import (
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/internal/s3err"
+ "github.com/aws/aws-sdk-go/service/s3/internal/arn"
)
func init() {
@@ -13,7 +14,7 @@ func init() {
func defaultInitClientFn(c *client.Client) {
// Support building custom endpoints based on config
- c.Handlers.Build.PushFront(updateEndpointForS3Config)
+ c.Handlers.Build.PushFront(endpointHandler)
// Require SSL when using SSE keys
c.Handlers.Validate.PushBack(validateSSERequiresSSL)
@@ -27,17 +28,11 @@ func defaultInitClientFn(c *client.Client) {
}
func defaultInitRequestFn(r *request.Request) {
- // Add reuest handlers for specific platforms.
+ // Add request handlers for specific platforms.
// e.g. 100-continue support for PUT requests using Go 1.6
platformRequestHandlers(r)
switch r.Operation.Name {
- case opPutBucketCors, opPutBucketLifecycle, opPutBucketPolicy,
- opPutBucketTagging, opDeleteObjects, opPutBucketLifecycleConfiguration,
- opPutObjectLegalHold, opPutObjectRetention, opPutObjectLockConfiguration,
- opPutBucketReplication:
- // These S3 operations require Content-MD5 to be set
- r.Handlers.Build.PushBack(contentMD5)
case opGetBucketLocation:
// GetBucketLocation has custom parsing logic
r.Handlers.Unmarshal.PushFront(buildGetBucketLocation)
@@ -73,3 +68,8 @@ type sseCustomerKeyGetter interface {
type copySourceSSECustomerKeyGetter interface {
getCopySourceSSECustomerKey() string
}
+
+type endpointARNGetter interface {
+ getEndpointARN() (arn.Resource, error)
+ hasEndpointARN() bool
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go
index 4b65f715..7f7aca20 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go
@@ -104,19 +104,6 @@
// content from S3. The Encryption and Decryption clients can be used concurrently
// once the client is created.
//
-// sess := session.Must(session.NewSession())
-//
-// // Create the decryption client.
-// svc := s3crypto.NewDecryptionClient(sess)
-//
-// // The object will be downloaded from S3 and decrypted locally. By metadata
-// // about the object's encryption will instruct the decryption client how
-// // decrypt the content of the object. By default KMS is used for keys.
-// result, err := svc.GetObject(&s3.GetObjectInput {
-// Bucket: aws.String(myBucket),
-// Key: aws.String(myKey),
-// })
-//
// See the s3crypto package documentation for more information.
// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3crypto/
//
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go
new file mode 100644
index 00000000..c4048fbf
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go
@@ -0,0 +1,233 @@
+package s3
+
+import (
+ "net/url"
+ "strings"
+
+ "github.com/aws/aws-sdk-go/aws"
+ awsarn "github.com/aws/aws-sdk-go/aws/arn"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/endpoints"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol"
+ "github.com/aws/aws-sdk-go/service/s3/internal/arn"
+)
+
+// Used by shapes with members decorated as endpoint ARN.
+func parseEndpointARN(v string) (arn.Resource, error) {
+ return arn.ParseResource(v, accessPointResourceParser)
+}
+
+func accessPointResourceParser(a awsarn.ARN) (arn.Resource, error) {
+ resParts := arn.SplitResource(a.Resource)
+ switch resParts[0] {
+ case "accesspoint":
+ return arn.ParseAccessPointResource(a, resParts[1:])
+ default:
+ return nil, arn.InvalidARNError{ARN: a, Reason: "unknown resource type"}
+ }
+}
+
+func endpointHandler(req *request.Request) {
+ endpoint, ok := req.Params.(endpointARNGetter)
+ if !ok || !endpoint.hasEndpointARN() {
+ updateBucketEndpointFromParams(req)
+ return
+ }
+
+ resource, err := endpoint.getEndpointARN()
+ if err != nil {
+ req.Error = newInvalidARNError(nil, err)
+ return
+ }
+
+ resReq := resourceRequest{
+ Resource: resource,
+ Request: req,
+ }
+
+ if resReq.IsCrossPartition() {
+ req.Error = newClientPartitionMismatchError(resource,
+ req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil)
+ return
+ }
+
+ if !resReq.AllowCrossRegion() && resReq.IsCrossRegion() {
+ req.Error = newClientRegionMismatchError(resource,
+ req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil)
+ return
+ }
+
+ if resReq.HasCustomEndpoint() {
+ req.Error = newInvalidARNWithCustomEndpointError(resource, nil)
+ return
+ }
+
+ switch tv := resource.(type) {
+ case arn.AccessPointARN:
+ err = updateRequestAccessPointEndpoint(req, tv)
+ if err != nil {
+ req.Error = err
+ }
+ default:
+ req.Error = newInvalidARNError(resource, nil)
+ }
+}
+
+type resourceRequest struct {
+ Resource arn.Resource
+ Request *request.Request
+}
+
+func (r resourceRequest) ARN() awsarn.ARN {
+ return r.Resource.GetARN()
+}
+
+func (r resourceRequest) AllowCrossRegion() bool {
+ return aws.BoolValue(r.Request.Config.S3UseARNRegion)
+}
+
+func (r resourceRequest) UseFIPS() bool {
+ return isFIPS(aws.StringValue(r.Request.Config.Region))
+}
+
+func (r resourceRequest) IsCrossPartition() bool {
+ return r.Request.ClientInfo.PartitionID != r.Resource.GetARN().Partition
+}
+
+func (r resourceRequest) IsCrossRegion() bool {
+ return isCrossRegion(r.Request, r.Resource.GetARN().Region)
+}
+
+func (r resourceRequest) HasCustomEndpoint() bool {
+ return len(aws.StringValue(r.Request.Config.Endpoint)) > 0
+}
+
+func isFIPS(clientRegion string) bool {
+ return strings.HasPrefix(clientRegion, "fips-") || strings.HasSuffix(clientRegion, "-fips")
+}
+func isCrossRegion(req *request.Request, otherRegion string) bool {
+ return req.ClientInfo.SigningRegion != otherRegion
+}
+
+func updateBucketEndpointFromParams(r *request.Request) {
+ bucket, ok := bucketNameFromReqParams(r.Params)
+ if !ok {
+ // Ignore operation requests if the bucket name was not provided
+ // if this is an input validation error the validation handler
+ // will report it.
+ return
+ }
+ updateEndpointForS3Config(r, bucket)
+}
+
+func updateRequestAccessPointEndpoint(req *request.Request, accessPoint arn.AccessPointARN) error {
+ // Accelerate not supported
+ if aws.BoolValue(req.Config.S3UseAccelerate) {
+ return newClientConfiguredForAccelerateError(accessPoint,
+ req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil)
+ }
+
+ // Ignore the disable host prefix for access points since custom endpoints
+ // are not supported.
+ req.Config.DisableEndpointHostPrefix = aws.Bool(false)
+
+ if err := accessPointEndpointBuilder(accessPoint).Build(req); err != nil {
+ return err
+ }
+
+ removeBucketFromPath(req.HTTPRequest.URL)
+
+ return nil
+}
+
+func removeBucketFromPath(u *url.URL) {
+ u.Path = strings.Replace(u.Path, "/{Bucket}", "", -1)
+ if u.Path == "" {
+ u.Path = "/"
+ }
+}
+
+type accessPointEndpointBuilder arn.AccessPointARN
+
+const (
+ accessPointPrefixLabel = "accesspoint"
+ accountIDPrefixLabel = "accountID"
+ accesPointPrefixTemplate = "{" + accessPointPrefixLabel + "}-{" + accountIDPrefixLabel + "}."
+)
+
+func (a accessPointEndpointBuilder) Build(req *request.Request) error {
+ resolveRegion := arn.AccessPointARN(a).Region
+ cfgRegion := aws.StringValue(req.Config.Region)
+
+ if isFIPS(cfgRegion) {
+ if aws.BoolValue(req.Config.S3UseARNRegion) && isCrossRegion(req, resolveRegion) {
+ // FIPS with cross region is not supported, the SDK must fail
+ // because there is no well defined method for SDK to construct a
+ // correct FIPS endpoint.
+ return newClientConfiguredForCrossRegionFIPSError(arn.AccessPointARN(a),
+ req.ClientInfo.PartitionID, cfgRegion, nil)
+ }
+ resolveRegion = cfgRegion
+ }
+
+ endpoint, err := resolveRegionalEndpoint(req, resolveRegion)
+ if err != nil {
+ return newFailedToResolveEndpointError(arn.AccessPointARN(a),
+ req.ClientInfo.PartitionID, cfgRegion, err)
+ }
+
+ if err = updateRequestEndpoint(req, endpoint.URL); err != nil {
+ return err
+ }
+
+ const serviceEndpointLabel = "s3-accesspoint"
+
+ // dualstack provided by endpoint resolver
+ cfgHost := req.HTTPRequest.URL.Host
+ if strings.HasPrefix(cfgHost, "s3") {
+ req.HTTPRequest.URL.Host = serviceEndpointLabel + cfgHost[2:]
+ }
+
+ protocol.HostPrefixBuilder{
+ Prefix: accesPointPrefixTemplate,
+ LabelsFn: a.hostPrefixLabelValues,
+ }.Build(req)
+
+ req.ClientInfo.SigningName = endpoint.SigningName
+ req.ClientInfo.SigningRegion = endpoint.SigningRegion
+
+ err = protocol.ValidateEndpointHost(req.Operation.Name, req.HTTPRequest.URL.Host)
+ if err != nil {
+ return newInvalidARNError(arn.AccessPointARN(a), err)
+ }
+
+ return nil
+}
+
+func (a accessPointEndpointBuilder) hostPrefixLabelValues() map[string]string {
+ return map[string]string{
+ accessPointPrefixLabel: arn.AccessPointARN(a).AccessPointName,
+ accountIDPrefixLabel: arn.AccessPointARN(a).AccountID,
+ }
+}
+
+func resolveRegionalEndpoint(r *request.Request, region string) (endpoints.ResolvedEndpoint, error) {
+ return r.Config.EndpointResolver.EndpointFor(EndpointsID, region, func(opts *endpoints.Options) {
+ opts.DisableSSL = aws.BoolValue(r.Config.DisableSSL)
+ opts.UseDualStack = aws.BoolValue(r.Config.UseDualStack)
+ opts.S3UsEast1RegionalEndpoint = endpoints.RegionalS3UsEast1Endpoint
+ })
+}
+
+func updateRequestEndpoint(r *request.Request, endpoint string) (err error) {
+ endpoint = endpoints.AddScheme(endpoint, aws.BoolValue(r.Config.DisableSSL))
+
+ r.HTTPRequest.URL, err = url.Parse(endpoint + r.Operation.HTTPPath)
+ if err != nil {
+ return awserr.New(request.ErrCodeSerialization,
+ "failed to parse endpoint URL", err)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_errors.go b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_errors.go
new file mode 100644
index 00000000..9df03e78
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_errors.go
@@ -0,0 +1,151 @@
+package s3
+
+import (
+ "fmt"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/service/s3/internal/arn"
+)
+
+const (
+ invalidARNErrorErrCode = "InvalidARNError"
+ configurationErrorErrCode = "ConfigurationError"
+)
+
+type invalidARNError struct {
+ message string
+ resource arn.Resource
+ origErr error
+}
+
+func (e invalidARNError) Error() string {
+ var extra string
+ if e.resource != nil {
+ extra = "ARN: " + e.resource.String()
+ }
+ return awserr.SprintError(e.Code(), e.Message(), extra, e.origErr)
+}
+
+func (e invalidARNError) Code() string {
+ return invalidARNErrorErrCode
+}
+
+func (e invalidARNError) Message() string {
+ return e.message
+}
+
+func (e invalidARNError) OrigErr() error {
+ return e.origErr
+}
+
+func newInvalidARNError(resource arn.Resource, err error) invalidARNError {
+ return invalidARNError{
+ message: "invalid ARN",
+ origErr: err,
+ resource: resource,
+ }
+}
+
+func newInvalidARNWithCustomEndpointError(resource arn.Resource, err error) invalidARNError {
+ return invalidARNError{
+ message: "resource ARN not supported with custom client endpoints",
+ origErr: err,
+ resource: resource,
+ }
+}
+
+// ARN not supported for the target partition
+func newInvalidARNWithUnsupportedPartitionError(resource arn.Resource, err error) invalidARNError {
+ return invalidARNError{
+ message: "resource ARN not supported for the target ARN partition",
+ origErr: err,
+ resource: resource,
+ }
+}
+
+type configurationError struct {
+ message string
+ resource arn.Resource
+ clientPartitionID string
+ clientRegion string
+ origErr error
+}
+
+func (e configurationError) Error() string {
+ extra := fmt.Sprintf("ARN: %s, client partition: %s, client region: %s",
+ e.resource, e.clientPartitionID, e.clientRegion)
+
+ return awserr.SprintError(e.Code(), e.Message(), extra, e.origErr)
+}
+
+func (e configurationError) Code() string {
+ return configurationErrorErrCode
+}
+
+func (e configurationError) Message() string {
+ return e.message
+}
+
+func (e configurationError) OrigErr() error {
+ return e.origErr
+}
+
+func newClientPartitionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError {
+ return configurationError{
+ message: "client partition does not match provided ARN partition",
+ origErr: err,
+ resource: resource,
+ clientPartitionID: clientPartitionID,
+ clientRegion: clientRegion,
+ }
+}
+
+func newClientRegionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError {
+ return configurationError{
+ message: "client region does not match provided ARN region",
+ origErr: err,
+ resource: resource,
+ clientPartitionID: clientPartitionID,
+ clientRegion: clientRegion,
+ }
+}
+
+func newFailedToResolveEndpointError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError {
+ return configurationError{
+ message: "endpoint resolver failed to find an endpoint for the provided ARN region",
+ origErr: err,
+ resource: resource,
+ clientPartitionID: clientPartitionID,
+ clientRegion: clientRegion,
+ }
+}
+
+func newClientConfiguredForFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError {
+ return configurationError{
+ message: "client configured for fips but cross-region resource ARN provided",
+ origErr: err,
+ resource: resource,
+ clientPartitionID: clientPartitionID,
+ clientRegion: clientRegion,
+ }
+}
+
+func newClientConfiguredForAccelerateError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError {
+ return configurationError{
+ message: "client configured for S3 Accelerate but is supported with resource ARN",
+ origErr: err,
+ resource: resource,
+ clientPartitionID: clientPartitionID,
+ clientRegion: clientRegion,
+ }
+}
+
+func newClientConfiguredForCrossRegionFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError {
+ return configurationError{
+ message: "client configured for FIPS with cross-region enabled but is supported with cross-region resource ARN",
+ origErr: err,
+ resource: resource,
+ clientPartitionID: clientPartitionID,
+ clientRegion: clientRegion,
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go
index 931cb17b..49aeff16 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go
@@ -13,6 +13,12 @@ const (
// ErrCodeBucketAlreadyOwnedByYou for service response error code
// "BucketAlreadyOwnedByYou".
+ //
+ // The bucket you tried to create already exists, and you own it. Amazon S3
+ // returns this error in all AWS Regions except in the North Virginia Region.
+ // For legacy compatibility, if you re-create an existing bucket that you already
+ // own in the North Virginia Region, Amazon S3 returns 200 OK and resets the
+ // bucket access control lists (ACLs).
ErrCodeBucketAlreadyOwnedByYou = "BucketAlreadyOwnedByYou"
// ErrCodeNoSuchBucket for service response error code
@@ -36,13 +42,13 @@ const (
// ErrCodeObjectAlreadyInActiveTierError for service response error code
// "ObjectAlreadyInActiveTierError".
//
- // This operation is not allowed against this storage tier
+ // This operation is not allowed against this storage tier.
ErrCodeObjectAlreadyInActiveTierError = "ObjectAlreadyInActiveTierError"
// ErrCodeObjectNotInActiveTierError for service response error code
// "ObjectNotInActiveTierError".
//
// The source object of the COPY operation is not in the active tier and is
- // only stored in Amazon Glacier.
+ // only stored in Amazon S3 Glacier.
ErrCodeObjectNotInActiveTierError = "ObjectNotInActiveTierError"
)
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go
index a7fbc2de..81cdec1a 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go
@@ -30,10 +30,10 @@ var accelerateOpBlacklist = operationBlacklist{
opListBuckets, opCreateBucket, opDeleteBucket,
}
-// Request handler to automatically add the bucket name to the endpoint domain
+// Automatically add the bucket name to the endpoint domain
// if possible. This style of bucket is valid for all bucket names which are
// DNS compatible and do not contain "."
-func updateEndpointForS3Config(r *request.Request) {
+func updateEndpointForS3Config(r *request.Request, bucketName string) {
forceHostStyle := aws.BoolValue(r.Config.S3ForcePathStyle)
accelerate := aws.BoolValue(r.Config.S3UseAccelerate)
@@ -43,45 +43,29 @@ func updateEndpointForS3Config(r *request.Request) {
r.Config.Logger.Log("ERROR: aws.Config.S3UseAccelerate is not compatible with aws.Config.S3ForcePathStyle, ignoring S3ForcePathStyle.")
}
}
- updateEndpointForAccelerate(r)
+ updateEndpointForAccelerate(r, bucketName)
} else if !forceHostStyle && r.Operation.Name != opGetBucketLocation {
- updateEndpointForHostStyle(r)
+ updateEndpointForHostStyle(r, bucketName)
}
}
-func updateEndpointForHostStyle(r *request.Request) {
- bucket, ok := bucketNameFromReqParams(r.Params)
- if !ok {
- // Ignore operation requests if the bucketname was not provided
- // if this is an input validation error the validation handler
- // will report it.
- return
- }
-
- if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) {
+func updateEndpointForHostStyle(r *request.Request, bucketName string) {
+ if !hostCompatibleBucketName(r.HTTPRequest.URL, bucketName) {
// bucket name must be valid to put into the host
return
}
- moveBucketToHost(r.HTTPRequest.URL, bucket)
+ moveBucketToHost(r.HTTPRequest.URL, bucketName)
}
var (
accelElem = []byte("s3-accelerate.dualstack.")
)
-func updateEndpointForAccelerate(r *request.Request) {
- bucket, ok := bucketNameFromReqParams(r.Params)
- if !ok {
- // Ignore operation requests if the bucketname was not provided
- // if this is an input validation error the validation handler
- // will report it.
- return
- }
-
- if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) {
+func updateEndpointForAccelerate(r *request.Request, bucketName string) {
+ if !hostCompatibleBucketName(r.HTTPRequest.URL, bucketName) {
r.Error = awserr.New("InvalidParameterException",
- fmt.Sprintf("bucket name %s is not compatible with S3 Accelerate", bucket),
+ fmt.Sprintf("bucket name %s is not compatible with S3 Accelerate", bucketName),
nil)
return
}
@@ -106,7 +90,7 @@ func updateEndpointForAccelerate(r *request.Request) {
r.HTTPRequest.URL.Host = strings.Join(parts, ".")
- moveBucketToHost(r.HTTPRequest.URL, bucket)
+ moveBucketToHost(r.HTTPRequest.URL, bucketName)
}
// Attempts to retrieve the bucket name from the request input parameters.
@@ -148,8 +132,5 @@ func dnsCompatibleBucketName(bucket string) bool {
// moveBucketToHost moves the bucket name from the URI path to URL host.
func moveBucketToHost(u *url.URL, bucket string) {
u.Host = bucket + "." + u.Host
- u.Path = strings.Replace(u.Path, "/{Bucket}", "", -1)
- if u.Path == "" {
- u.Path = "/"
- }
+ removeBucketFromPath(u)
}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/internal/arn/accesspoint_arn.go b/vendor/github.com/aws/aws-sdk-go/service/s3/internal/arn/accesspoint_arn.go
new file mode 100644
index 00000000..2f93f96f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/internal/arn/accesspoint_arn.go
@@ -0,0 +1,45 @@
+package arn
+
+import (
+ "strings"
+
+ "github.com/aws/aws-sdk-go/aws/arn"
+)
+
+// AccessPointARN provides representation
+type AccessPointARN struct {
+ arn.ARN
+ AccessPointName string
+}
+
+// GetARN returns the base ARN for the Access Point resource
+func (a AccessPointARN) GetARN() arn.ARN {
+ return a.ARN
+}
+
+// ParseAccessPointResource attempts to parse the ARN's resource as an
+// AccessPoint resource.
+func ParseAccessPointResource(a arn.ARN, resParts []string) (AccessPointARN, error) {
+ if len(a.Region) == 0 {
+ return AccessPointARN{}, InvalidARNError{a, "region not set"}
+ }
+ if len(a.AccountID) == 0 {
+ return AccessPointARN{}, InvalidARNError{a, "account-id not set"}
+ }
+ if len(resParts) == 0 {
+ return AccessPointARN{}, InvalidARNError{a, "resource-id not set"}
+ }
+ if len(resParts) > 1 {
+ return AccessPointARN{}, InvalidARNError{a, "sub resource not supported"}
+ }
+
+ resID := resParts[0]
+ if len(strings.TrimSpace(resID)) == 0 {
+ return AccessPointARN{}, InvalidARNError{a, "resource-id not set"}
+ }
+
+ return AccessPointARN{
+ ARN: a,
+ AccessPointName: resID,
+ }, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/internal/arn/arn.go b/vendor/github.com/aws/aws-sdk-go/service/s3/internal/arn/arn.go
new file mode 100644
index 00000000..a942d887
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/internal/arn/arn.go
@@ -0,0 +1,71 @@
+package arn
+
+import (
+ "strings"
+
+ "github.com/aws/aws-sdk-go/aws/arn"
+)
+
+// Resource provides the interfaces abstracting ARNs of specific resource
+// types.
+type Resource interface {
+ GetARN() arn.ARN
+ String() string
+}
+
+// ResourceParser provides the function for parsing an ARN's resource
+// component into a typed resource.
+type ResourceParser func(arn.ARN) (Resource, error)
+
+// ParseResource parses an AWS ARN into a typed resource for the S3 API.
+func ParseResource(s string, resParser ResourceParser) (resARN Resource, err error) {
+ a, err := arn.Parse(s)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(a.Partition) == 0 {
+ return nil, InvalidARNError{a, "partition not set"}
+ }
+ if a.Service != "s3" {
+ return nil, InvalidARNError{a, "service is not S3"}
+ }
+ if len(a.Resource) == 0 {
+ return nil, InvalidARNError{a, "resource not set"}
+ }
+
+ return resParser(a)
+}
+
+// SplitResource splits the resource components by the ARN resource delimiters.
+func SplitResource(v string) []string {
+ var parts []string
+ var offset int
+
+ for offset <= len(v) {
+ idx := strings.IndexAny(v[offset:], "/:")
+ if idx < 0 {
+ parts = append(parts, v[offset:])
+ break
+ }
+ parts = append(parts, v[offset:idx+offset])
+ offset += idx + 1
+ }
+
+ return parts
+}
+
+// IsARN returns whether the given string is an ARN
+func IsARN(s string) bool {
+ return arn.IsARN(s)
+}
+
+// InvalidARNError provides the error for an invalid ARN error.
+type InvalidARNError struct {
+ ARN arn.ARN
+ Reason string
+}
+
+func (e InvalidARNError) Error() string {
+ return "invalid Amazon S3 ARN, " + e.Reason + ", " + e.ARN.String()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go
index d17dcc9d..b4c07b4d 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go
@@ -31,7 +31,7 @@ var initRequest func(*request.Request)
const (
ServiceName = "s3" // Name of service.
EndpointsID = ServiceName // ID to lookup a service endpoint with.
- ServiceID = "S3" // ServiceID is a unique identifer of a specific service.
+ ServiceID = "S3" // ServiceID is a unique identifier of a specific service.
)
// New creates a new instance of the S3 client with a session.
@@ -39,6 +39,8 @@ const (
// aws.Config parameter to add your extra config.
//
// Example:
+// mySession := session.Must(session.NewSession())
+//
// // Create a S3 client from just a session.
// svc := s3.New(mySession)
//
@@ -46,11 +48,11 @@ const (
// svc := s3.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3 {
c := p.ClientConfig(EndpointsID, cfgs...)
- return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName)
+ return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName)
}
// newClient creates, initializes and returns a new service client instance.
-func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *S3 {
+func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *S3 {
svc := &S3{
Client: client.New(
cfg,
@@ -59,6 +61,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio
ServiceID: ServiceID,
SigningName: signingName,
SigningRegion: signingRegion,
+ PartitionID: partitionID,
Endpoint: endpoint,
APIVersion: "2006-03-01",
},
@@ -75,6 +78,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio
svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler)
svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler)
+ svc.Handlers.BuildStream.PushBackNamed(restxml.BuildHandler)
svc.Handlers.UnmarshalStream.PushBackNamed(restxml.UnmarshalHandler)
// Run custom client initialization if present
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go
index f6a69aed..247770e4 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go
@@ -2,6 +2,7 @@ package s3
import (
"bytes"
+ "io"
"io/ioutil"
"net/http"
@@ -24,17 +25,18 @@ func copyMultipartStatusOKUnmarhsalError(r *request.Request) {
r.HTTPResponse.Body = ioutil.NopCloser(body)
defer body.Seek(0, sdkio.SeekStart)
- if body.Len() == 0 {
- // If there is no body don't attempt to parse the body.
- return
- }
-
unmarshalError(r)
if err, ok := r.Error.(awserr.Error); ok && err != nil {
- if err.Code() == request.ErrCodeSerialization {
+ if err.Code() == request.ErrCodeSerialization &&
+ err.OrigErr() != io.EOF {
r.Error = nil
return
}
- r.HTTPResponse.StatusCode = http.StatusServiceUnavailable
+ // if empty payload
+ if err.OrigErr() == io.EOF {
+ r.HTTPResponse.StatusCode = http.StatusInternalServerError
+ } else {
+ r.HTTPResponse.StatusCode = http.StatusServiceUnavailable
+ }
}
}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go
index 5b63fac7..6eecf669 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go
@@ -1,6 +1,7 @@
package s3
import (
+ "bytes"
"encoding/xml"
"fmt"
"io"
@@ -45,17 +46,24 @@ func unmarshalError(r *request.Request) {
// Attempt to parse error from body if it is known
var errResp xmlErrorResponse
- err := xmlutil.UnmarshalXMLError(&errResp, r.HTTPResponse.Body)
- if err == io.EOF {
- // Only capture the error if an unmarshal error occurs that is not EOF,
- // because S3 might send an error without a error message which causes
- // the XML unmarshal to fail with EOF.
- err = nil
+ var err error
+ if r.HTTPResponse.StatusCode >= 200 && r.HTTPResponse.StatusCode < 300 {
+ err = s3unmarshalXMLError(&errResp, r.HTTPResponse.Body)
+ } else {
+ err = xmlutil.UnmarshalXMLError(&errResp, r.HTTPResponse.Body)
}
+
if err != nil {
+ var errorMsg string
+ if err == io.EOF {
+ errorMsg = "empty response payload"
+ } else {
+ errorMsg = "failed to unmarshal error message"
+ }
+
r.Error = awserr.NewRequestFailure(
awserr.New(request.ErrCodeSerialization,
- "failed to unmarshal error message", err),
+ errorMsg, err),
r.HTTPResponse.StatusCode,
r.RequestID,
)
@@ -86,3 +94,21 @@ type RequestFailure interface {
// Host ID is the S3 Host ID needed for debug, and contacting support
HostID() string
}
+
+// s3unmarshalXMLError is s3 specific xml error unmarshaler
+// for 200 OK errors and response payloads.
+// This function differs from the xmlUtil.UnmarshalXMLError
+// func. It does not ignore the EOF error and passes it up.
+// Related to bug fix for `s3 200 OK response with empty payload`
+func s3unmarshalXMLError(v interface{}, stream io.Reader) error {
+ var errBuf bytes.Buffer
+ body := io.TeeReader(stream, &errBuf)
+
+ err := xml.NewDecoder(body).Decode(v)
+ if err != nil && err != io.EOF {
+ return awserr.NewUnmarshalError(err,
+ "failed to unmarshal error message", errBuf.Bytes())
+ }
+
+ return err
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
index eb0a6a41..550b5f68 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
@@ -78,6 +78,8 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o
// IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html)
// in the IAM User Guide.
//
+// Session Duration
+//
// By default, the temporary security credentials created by AssumeRole last
// for one hour. However, you can use the optional DurationSeconds parameter
// to specify the duration of your session. You can provide a value from 900
@@ -91,6 +93,8 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o
// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html)
// in the IAM User Guide.
//
+// Permissions
+//
// The temporary security credentials created by AssumeRole can be used to make
// API calls to any AWS service with the following exception: You cannot call
// the AWS STS GetFederationToken or GetSessionToken API operations.
@@ -99,7 +103,7 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o
// to this operation. You can pass a single JSON policy document to use as an
// inline session policy. You can also specify up to 10 managed policies to
// use as managed session policies. The plain text that you use for both inline
-// and managed session policies shouldn't exceed 2048 characters. Passing policies
+// and managed session policies can't exceed 2,048 characters. Passing policies
// to this operation returns new temporary credentials. The resulting session's
// permissions are the intersection of the role's identity-based policy and
// the session policies. You can use the role's temporary credentials in subsequent
@@ -131,6 +135,24 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o
// see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html)
// in the IAM User Guide.
//
+// Tags
+//
+// (Optional) You can pass tag key-value pairs to your session. These tags are
+// called session tags. For more information about session tags, see Passing
+// Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
+// in the IAM User Guide.
+//
+// An administrator must grant you the permissions necessary to pass session
+// tags. The administrator can also create granular permissions to allow you
+// to pass only specific session tags. For more information, see Tutorial: Using
+// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html)
+// in the IAM User Guide.
+//
+// You can set the session tags as transitive. Transitive tags persist during
+// role chaining. For more information, see Chaining Roles with Session Tags
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining)
+// in the IAM User Guide.
+//
// Using MFA with AssumeRole
//
// (Optional) You can include multi-factor authentication (MFA) information
@@ -165,9 +187,18 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o
// message describes the specific error.
//
// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
-// The request was rejected because the policy document was too large. The error
-// message describes how big the policy document is, in packed form, as a percentage
-// of what the API allows.
+// The request was rejected because the total packed size of the session policies
+// and session tags combined was too large. An AWS conversion compresses the
+// session policy document, session policy ARNs, and session tags into a packed
+// binary format that has a separate limit. The error message indicates by percentage
+// how close the policies and tags are to the upper size limit. For more information,
+// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
+// in the IAM User Guide.
+//
+// You could receive this error even though you meet other defined session policy
+// and session tag limits. For more information, see IAM and STS Entity Character
+// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+// in the IAM User Guide.
//
// * ErrCodeRegionDisabledException "RegionDisabledException"
// STS is not activated in the requested region for the account that is being
@@ -256,6 +287,8 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
// an access key ID, a secret access key, and a security token. Applications
// can use these temporary security credentials to sign calls to AWS services.
//
+// Session Duration
+//
// By default, the temporary security credentials created by AssumeRoleWithSAML
// last for one hour. However, you can use the optional DurationSeconds parameter
// to specify the duration of your session. Your role session lasts for the
@@ -271,6 +304,8 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html)
// in the IAM User Guide.
//
+// Permissions
+//
// The temporary security credentials created by AssumeRoleWithSAML can be used
// to make API calls to any AWS service with the following exception: you cannot
// call the STS GetFederationToken or GetSessionToken API operations.
@@ -279,7 +314,7 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
// to this operation. You can pass a single JSON policy document to use as an
// inline session policy. You can also specify up to 10 managed policies to
// use as managed session policies. The plain text that you use for both inline
-// and managed session policies shouldn't exceed 2048 characters. Passing policies
+// and managed session policies can't exceed 2,048 characters. Passing policies
// to this operation returns new temporary credentials. The resulting session's
// permissions are the intersection of the role's identity-based policy and
// the session policies. You can use the role's temporary credentials in subsequent
@@ -289,12 +324,6 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
// in the IAM User Guide.
//
-// Before your application can call AssumeRoleWithSAML, you must configure your
-// SAML identity provider (IdP) to issue the claims required by AWS. Additionally,
-// you must use AWS Identity and Access Management (IAM) to create a SAML provider
-// entity in your AWS account that represents your identity provider. You must
-// also create an IAM role that specifies this SAML provider in its trust policy.
-//
// Calling AssumeRoleWithSAML does not require the use of AWS security credentials.
// The identity of the caller is validated by using keys in the metadata document
// that is uploaded for the SAML provider entity for your identity provider.
@@ -302,8 +331,50 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
// Calling AssumeRoleWithSAML can result in an entry in your AWS CloudTrail
// logs. The entry includes the value in the NameID element of the SAML assertion.
// We recommend that you use a NameIDType that is not associated with any personally
-// identifiable information (PII). For example, you could instead use the Persistent
-// Identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent).
+// identifiable information (PII). For example, you could instead use the persistent
+// identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent).
+//
+// Tags
+//
+// (Optional) You can configure your IdP to pass attributes into your SAML assertion
+// as session tags. Each session tag consists of a key name and an associated
+// value. For more information about session tags, see Passing Session Tags
+// in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
+// in the IAM User Guide.
+//
+// You can pass up to 50 session tags. The plain text session tag keys can’t
+// exceed 128 characters and the values can’t exceed 256 characters. For these
+// and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length)
+// in the IAM User Guide.
+//
+// An AWS conversion compresses the passed session policies and session tags
+// into a packed binary format that has a separate limit. Your request can fail
+// for this limit even if your plain text meets the other requirements. The
+// PackedPolicySize response element indicates by percentage how close the policies
+// and tags for your request are to the upper size limit.
+//
+// You can pass a session tag with the same key as a tag that is attached to
+// the role. When you do, session tags override the role's tags with the same
+// key.
+//
+// An administrator must grant you the permissions necessary to pass session
+// tags. The administrator can also create granular permissions to allow you
+// to pass only specific session tags. For more information, see Tutorial: Using
+// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html)
+// in the IAM User Guide.
+//
+// You can set the session tags as transitive. Transitive tags persist during
+// role chaining. For more information, see Chaining Roles with Session Tags
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining)
+// in the IAM User Guide.
+//
+// SAML Configuration
+//
+// Before your application can call AssumeRoleWithSAML, you must configure your
+// SAML identity provider (IdP) to issue the claims required by AWS. Additionally,
+// you must use AWS Identity and Access Management (IAM) to create a SAML provider
+// entity in your AWS account that represents your identity provider. You must
+// also create an IAM role that specifies this SAML provider in its trust policy.
//
// For more information, see the following resources:
//
@@ -332,9 +403,18 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
// message describes the specific error.
//
// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
-// The request was rejected because the policy document was too large. The error
-// message describes how big the policy document is, in packed form, as a percentage
-// of what the API allows.
+// The request was rejected because the total packed size of the session policies
+// and session tags combined was too large. An AWS conversion compresses the
+// session policy document, session policy ARNs, and session tags into a packed
+// binary format that has a separate limit. The error message indicates by percentage
+// how close the policies and tags are to the upper size limit. For more information,
+// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
+// in the IAM User Guide.
+//
+// You could receive this error even though you meet other defined session policy
+// and session tag limits. For more information, see IAM and STS Entity Character
+// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+// in the IAM User Guide.
//
// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim"
// The identity provider (IdP) reported that authentication failed. This might
@@ -456,6 +536,8 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
// key ID, a secret access key, and a security token. Applications can use these
// temporary security credentials to sign calls to AWS service API operations.
//
+// Session Duration
+//
// By default, the temporary security credentials created by AssumeRoleWithWebIdentity
// last for one hour. However, you can use the optional DurationSeconds parameter
// to specify the duration of your session. You can provide a value from 900
@@ -469,6 +551,8 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html)
// in the IAM User Guide.
//
+// Permissions
+//
// The temporary security credentials created by AssumeRoleWithWebIdentity can
// be used to make API calls to any AWS service with the following exception:
// you cannot call the STS GetFederationToken or GetSessionToken API operations.
@@ -477,7 +561,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
// to this operation. You can pass a single JSON policy document to use as an
// inline session policy. You can also specify up to 10 managed policies to
// use as managed session policies. The plain text that you use for both inline
-// and managed session policies shouldn't exceed 2048 characters. Passing policies
+// and managed session policies can't exceed 2,048 characters. Passing policies
// to this operation returns new temporary credentials. The resulting session's
// permissions are the intersection of the role's identity-based policy and
// the session policies. You can use the role's temporary credentials in subsequent
@@ -487,6 +571,42 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
// in the IAM User Guide.
//
+// Tags
+//
+// (Optional) You can configure your IdP to pass attributes into your web identity
+// token as session tags. Each session tag consists of a key name and an associated
+// value. For more information about session tags, see Passing Session Tags
+// in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
+// in the IAM User Guide.
+//
+// You can pass up to 50 session tags. The plain text session tag keys can’t
+// exceed 128 characters and the values can’t exceed 256 characters. For these
+// and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length)
+// in the IAM User Guide.
+//
+// An AWS conversion compresses the passed session policies and session tags
+// into a packed binary format that has a separate limit. Your request can fail
+// for this limit even if your plain text meets the other requirements. The
+// PackedPolicySize response element indicates by percentage how close the policies
+// and tags for your request are to the upper size limit.
+//
+// You can pass a session tag with the same key as a tag that is attached to
+// the role. When you do, the session tag overrides the role tag with the same
+// key.
+//
+// An administrator must grant you the permissions necessary to pass session
+// tags. The administrator can also create granular permissions to allow you
+// to pass only specific session tags. For more information, see Tutorial: Using
+// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html)
+// in the IAM User Guide.
+//
+// You can set the session tags as transitive. Transitive tags persist during
+// role chaining. For more information, see Chaining Roles with Session Tags
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining)
+// in the IAM User Guide.
+//
+// Identities
+//
// Before your application can call AssumeRoleWithWebIdentity, you must have
// an identity token from a supported identity provider and create a role that
// the application can assume. The role that your application assumes must trust
@@ -514,8 +634,8 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
// * AWS SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) and
// AWS SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/).
// These toolkits contain sample apps that show how to invoke the identity
-// providers, and then how to use the information from these providers to
-// get and use temporary security credentials.
+// providers. The toolkits then show how to use the information from these
+// providers to get and use temporary security credentials.
//
// * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications).
// This article discusses web identity federation and shows an example of
@@ -535,9 +655,18 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
// message describes the specific error.
//
// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
-// The request was rejected because the policy document was too large. The error
-// message describes how big the policy document is, in packed form, as a percentage
-// of what the API allows.
+// The request was rejected because the total packed size of the session policies
+// and session tags combined was too large. An AWS conversion compresses the
+// session policy document, session policy ARNs, and session tags into a packed
+// binary format that has a separate limit. The error message indicates by percentage
+// how close the policies and tags are to the upper size limit. For more information,
+// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
+// in the IAM User Guide.
+//
+// You could receive this error even though you meet other defined session policy
+// and session tag limits. For more information, see IAM and STS Entity Character
+// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+// in the IAM User Guide.
//
// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim"
// The identity provider (IdP) reported that authentication failed. This might
@@ -547,11 +676,11 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
// can also mean that the claim has expired or has been explicitly revoked.
//
// * ErrCodeIDPCommunicationErrorException "IDPCommunicationError"
-// The request could not be fulfilled because the non-AWS identity provider
-// (IDP) that was asked to verify the incoming identity token could not be reached.
-// This is often a transient error caused by network conditions. Retry the request
+// The request could not be fulfilled because the identity provider (IDP) that
+// was asked to verify the incoming identity token could not be reached. This
+// is often a transient error caused by network conditions. Retry the request
// a limited number of times so that you don't exceed the request rate. If the
-// error persists, the non-AWS identity provider might be down or not responding.
+// error persists, the identity provider might be down or not responding.
//
// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken"
// The web identity token that was passed could not be validated by AWS. Get
@@ -763,7 +892,8 @@ func (c *STS) GetAccessKeyInfoRequest(input *GetAccessKeyInfoInput) (req *reques
// pull a credentials report (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html)
// to learn which IAM user owns the keys. To learn who requested the temporary
// credentials for an ASIA access key, view the STS events in your CloudTrail
-// logs (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html).
+// logs (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html)
+// in the IAM User Guide.
//
// This operation does not indicate the state of the access key. The key might
// be active, inactive, or deleted. Active keys might not have permissions to
@@ -850,7 +980,8 @@ func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *requ
// sts:GetCallerIdentity action, you can still perform this operation. Permissions
// are not required because the same information is returned when an IAM user
// or role is denied access. To view an example response, see I Am Not Authorized
-// to Perform: iam:DeleteVirtualMFADevice (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa).
+// to Perform: iam:DeleteVirtualMFADevice (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa)
+// in the IAM User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -942,7 +1073,8 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re
// or an OpenID Connect-compatible identity provider. In this case, we recommend
// that you use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity.
// For more information, see Federation Through a Web-based Identity Provider
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity).
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity)
+// in the IAM User Guide.
//
// You can also call GetFederationToken using the security credentials of an
// AWS account root user, but we do not recommend it. Instead, we recommend
@@ -952,41 +1084,67 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re
// Practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html)
// in the IAM User Guide.
//
+// Session duration
+//
// The temporary credentials are valid for the specified duration, from 900
// seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default
-// is 43,200 seconds (12 hours). Temporary credentials that are obtained by
-// using AWS account root user credentials have a maximum duration of 3,600
-// seconds (1 hour).
+// session duration is 43,200 seconds (12 hours). Temporary credentials that
+// are obtained by using AWS account root user credentials have a maximum duration
+// of 3,600 seconds (1 hour).
//
-// The temporary security credentials created by GetFederationToken can be used
-// to make API calls to any AWS service with the following exceptions:
+// Permissions
//
-// * You cannot use these credentials to call any IAM API operations.
+// You can use the temporary credentials created by GetFederationToken in any
+// AWS service except the following:
//
-// * You cannot call any STS API operations except GetCallerIdentity.
+// * You cannot call any IAM operations using the AWS CLI or the AWS API.
//
-// Permissions
+// * You cannot call any STS operations except GetCallerIdentity.
//
// You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
// to this operation. You can pass a single JSON policy document to use as an
// inline session policy. You can also specify up to 10 managed policies to
// use as managed session policies. The plain text that you use for both inline
-// and managed session policies shouldn't exceed 2048 characters.
+// and managed session policies can't exceed 2,048 characters.
//
// Though the session policy parameters are optional, if you do not pass a policy,
-// then the resulting federated user session has no permissions. The only exception
-// is when the credentials are used to access a resource that has a resource-based
-// policy that specifically references the federated user session in the Principal
-// element of the policy. When you pass session policies, the session permissions
-// are the intersection of the IAM user policies and the session policies that
-// you pass. This gives you a way to further restrict the permissions for a
-// federated user. You cannot use session policies to grant more permissions
-// than those that are defined in the permissions policy of the IAM user. For
-// more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+// then the resulting federated user session has no permissions. When you pass
+// session policies, the session permissions are the intersection of the IAM
+// user policies and the session policies that you pass. This gives you a way
+// to further restrict the permissions for a federated user. You cannot use
+// session policies to grant more permissions than those that are defined in
+// the permissions policy of the IAM user. For more information, see Session
+// Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
// in the IAM User Guide. For information about using GetFederationToken to
// create temporary security credentials, see GetFederationToken—Federation
// Through a Custom Identity Broker (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken).
//
+// You can use the credentials to access a resource that has a resource-based
+// policy. If that policy specifically references the federated user session
+// in the Principal element of the policy, the session has the permissions allowed
+// by the policy. These permissions are granted in addition to the permissions
+// granted by the session policies.
+//
+// Tags
+//
+// (Optional) You can pass tag key-value pairs to your session. These are called
+// session tags. For more information about session tags, see Passing Session
+// Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
+// in the IAM User Guide.
+//
+// An administrator must grant you the permissions necessary to pass session
+// tags. The administrator can also create granular permissions to allow you
+// to pass only specific session tags. For more information, see Tutorial: Using
+// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html)
+// in the IAM User Guide.
+//
+// Tag key–value pairs are not case sensitive, but case is preserved. This
+// means that you cannot have separate Department and department tag keys. Assume
+// that the user that you are federating has the Department=Marketing tag and
+// you pass the department=engineering session tag. Department and department
+// are not saved as separate tags, and the session tag passed in the request
+// takes precedence over the user tag.
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -1000,9 +1158,18 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re
// message describes the specific error.
//
// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
-// The request was rejected because the policy document was too large. The error
-// message describes how big the policy document is, in packed form, as a percentage
-// of what the API allows.
+// The request was rejected because the total packed size of the session policies
+// and session tags combined was too large. An AWS conversion compresses the
+// session policy document, session policy ARNs, and session tags into a packed
+// binary format that has a separate limit. The error message indicates by percentage
+// how close the policies and tags are to the upper size limit. For more information,
+// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
+// in the IAM User Guide.
+//
+// You could receive this error even though you meet other defined session policy
+// and session tag limits. For more information, see IAM and STS Entity Character
+// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+// in the IAM User Guide.
//
// * ErrCodeRegionDisabledException "RegionDisabledException"
// STS is not activated in the requested region for the account that is being
@@ -1091,6 +1258,8 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.
// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
// in the IAM User Guide.
//
+// Session Duration
+//
// The GetSessionToken operation must be called by using the long-term AWS security
// credentials of the AWS account root user or an IAM user. Credentials that
// are created by IAM users are valid for the duration that you specify. This
@@ -1099,6 +1268,8 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.
// based on account credentials can range from 900 seconds (15 minutes) up to
// 3,600 seconds (1 hour), with a default of 1 hour.
//
+// Permissions
+//
// The temporary security credentials created by GetSessionToken can be used
// to make API calls to any AWS service with the following exceptions:
//
@@ -1213,16 +1384,16 @@ type AssumeRoleInput struct {
// in the IAM User Guide.
//
// The plain text that you use for both inline and managed session policies
- // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII
+ // can't exceed 2,048 characters. The JSON policy characters can be any ASCII
// character from the space character to the end of the valid character list
// (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A),
// and carriage return (\u000D) characters.
//
- // The characters in this parameter count towards the 2048 character session
- // policy guideline. However, an AWS conversion compresses the session policies
- // into a packed binary format that has a separate limit. This is the enforced
- // limit. The PackedPolicySize response element indicates by percentage how
- // close the policy is to the upper size limit.
+ // An AWS conversion compresses the passed session policies and session tags
+ // into a packed binary format that has a separate limit. Your request can fail
+ // for this limit even if your plain text meets the other requirements. The
+ // PackedPolicySize response element indicates by percentage how close the policies
+ // and tags for your request are to the upper size limit.
Policy *string `min:"1" type:"string"`
// The Amazon Resource Names (ARNs) of the IAM managed policies that you want
@@ -1231,15 +1402,15 @@ type AssumeRoleInput struct {
//
// This parameter is optional. You can provide up to 10 managed policy ARNs.
// However, the plain text that you use for both inline and managed session
- // policies shouldn't exceed 2048 characters. For more information about ARNs,
+ // policies can't exceed 2,048 characters. For more information about ARNs,
// see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
// in the AWS General Reference.
//
- // The characters in this parameter count towards the 2048 character session
- // policy guideline. However, an AWS conversion compresses the session policies
- // into a packed binary format that has a separate limit. This is the enforced
- // limit. The PackedPolicySize response element indicates by percentage how
- // close the policy is to the upper size limit.
+ // An AWS conversion compresses the passed session policies and session tags
+ // into a packed binary format that has a separate limit. Your request can fail
+ // for this limit even if your plain text meets the other requirements. The
+ // PackedPolicySize response element indicates by percentage how close the policies
+ // and tags for your request are to the upper size limit.
//
// Passing policies to this operation returns new temporary credentials. The
// resulting session's permissions are the intersection of the role's identity-based
@@ -1284,6 +1455,41 @@ type AssumeRoleInput struct {
// also include underscores or any of the following characters: =,.@-
SerialNumber *string `min:"9" type:"string"`
+ // A list of session tags that you want to pass. Each session tag consists of
+ // a key name and an associated value. For more information about session tags,
+ // see Tagging AWS STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
+ // in the IAM User Guide.
+ //
+ // This parameter is optional. You can pass up to 50 session tags. The plain
+ // text session tag keys can’t exceed 128 characters, and the values can’t
+ // exceed 256 characters. For these and additional limits, see IAM and STS Character
+ // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length)
+ // in the IAM User Guide.
+ //
+ // An AWS conversion compresses the passed session policies and session tags
+ // into a packed binary format that has a separate limit. Your request can fail
+ // for this limit even if your plain text meets the other requirements. The
+ // PackedPolicySize response element indicates by percentage how close the policies
+ // and tags for your request are to the upper size limit.
+ //
+ // You can pass a session tag with the same key as a tag that is already attached
+ // to the role. When you do, session tags override a role tag with the same
+ // key.
+ //
+ // Tag key–value pairs are not case sensitive, but case is preserved. This
+ // means that you cannot have separate Department and department tag keys. Assume
+ // that the role has the Department=Marketing tag and you pass the department=engineering
+ // session tag. Department and department are not saved as separate tags, and
+ // the session tag passed in the request takes precedence over the role tag.
+ //
+ // Additionally, if you used temporary credentials to perform this operation,
+ // the new session inherits any transitive session tags from the calling session.
+ // If you pass a session tag with the same key as an inherited tag, the operation
+ // fails. To view the inherited tags for a session, see the AWS CloudTrail logs.
+ // For more information, see Viewing Session Tags in CloudTrail (https://docs.aws.amazon.com/IAM/latest/UserGuide/session-tags.html#id_session-tags_ctlogs)
+ // in the IAM User Guide.
+ Tags []*Tag `type:"list"`
+
// The value provided by the MFA device, if the trust policy of the role being
// assumed requires MFA (that is, if the policy includes a condition that tests
// for MFA). If the role being assumed requires MFA and if the TokenCode value
@@ -1292,6 +1498,19 @@ type AssumeRoleInput struct {
// The format for this parameter, as described by its regex pattern, is a sequence
// of six numeric digits.
TokenCode *string `min:"6" type:"string"`
+
+ // A list of keys for session tags that you want to set as transitive. If you
+ // set a tag key as transitive, the corresponding key and value passes to subsequent
+ // sessions in a role chain. For more information, see Chaining Roles with Session
+ // Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining)
+ // in the IAM User Guide.
+ //
+ // This parameter is optional. When you set session tags as transitive, the
+ // session policy and session tags packed binary limit is not affected.
+ //
+ // If you choose not to specify a transitive tag key, then no tags are passed
+ // from this session to any subsequent sessions.
+ TransitiveTagKeys []*string `type:"list"`
}
// String returns the string representation
@@ -1344,6 +1563,16 @@ func (s *AssumeRoleInput) Validate() error {
}
}
}
+ if s.Tags != nil {
+ for i, v := range s.Tags {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
if invalidParams.Len() > 0 {
return invalidParams
@@ -1393,12 +1622,24 @@ func (s *AssumeRoleInput) SetSerialNumber(v string) *AssumeRoleInput {
return s
}
+// SetTags sets the Tags field's value.
+func (s *AssumeRoleInput) SetTags(v []*Tag) *AssumeRoleInput {
+ s.Tags = v
+ return s
+}
+
// SetTokenCode sets the TokenCode field's value.
func (s *AssumeRoleInput) SetTokenCode(v string) *AssumeRoleInput {
s.TokenCode = &v
return s
}
+// SetTransitiveTagKeys sets the TransitiveTagKeys field's value.
+func (s *AssumeRoleInput) SetTransitiveTagKeys(v []*string) *AssumeRoleInput {
+ s.TransitiveTagKeys = v
+ return s
+}
+
// Contains the response to a successful AssumeRole request, including temporary
// AWS credentials that can be used to make AWS requests.
type AssumeRoleOutput struct {
@@ -1418,9 +1659,10 @@ type AssumeRoleOutput struct {
// We strongly recommend that you make no assumptions about the maximum size.
Credentials *Credentials `type:"structure"`
- // A percentage value that indicates the size of the policy in packed form.
- // The service rejects any policy with a packed size greater than 100 percent,
- // which means the policy exceeded the allowed space.
+ // A percentage value that indicates the packed size of the session policies
+ // and session tags combined passed in the request. The request fails if the
+ // packed size is greater than 100 percent, which means the policies and tags
+ // exceeded the allowed space.
PackedPolicySize *int64 `type:"integer"`
}
@@ -1491,16 +1733,16 @@ type AssumeRoleWithSAMLInput struct {
// in the IAM User Guide.
//
// The plain text that you use for both inline and managed session policies
- // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII
+ // can't exceed 2,048 characters. The JSON policy characters can be any ASCII
// character from the space character to the end of the valid character list
// (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A),
// and carriage return (\u000D) characters.
//
- // The characters in this parameter count towards the 2048 character session
- // policy guideline. However, an AWS conversion compresses the session policies
- // into a packed binary format that has a separate limit. This is the enforced
- // limit. The PackedPolicySize response element indicates by percentage how
- // close the policy is to the upper size limit.
+ // An AWS conversion compresses the passed session policies and session tags
+ // into a packed binary format that has a separate limit. Your request can fail
+ // for this limit even if your plain text meets the other requirements. The
+ // PackedPolicySize response element indicates by percentage how close the policies
+ // and tags for your request are to the upper size limit.
Policy *string `min:"1" type:"string"`
// The Amazon Resource Names (ARNs) of the IAM managed policies that you want
@@ -1509,15 +1751,15 @@ type AssumeRoleWithSAMLInput struct {
//
// This parameter is optional. You can provide up to 10 managed policy ARNs.
// However, the plain text that you use for both inline and managed session
- // policies shouldn't exceed 2048 characters. For more information about ARNs,
+ // policies can't exceed 2,048 characters. For more information about ARNs,
// see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
// in the AWS General Reference.
//
- // The characters in this parameter count towards the 2048 character session
- // policy guideline. However, an AWS conversion compresses the session policies
- // into a packed binary format that has a separate limit. This is the enforced
- // limit. The PackedPolicySize response element indicates by percentage how
- // close the policy is to the upper size limit.
+ // An AWS conversion compresses the passed session policies and session tags
+ // into a packed binary format that has a separate limit. Your request can fail
+ // for this limit even if your plain text meets the other requirements. The
+ // PackedPolicySize response element indicates by percentage how close the policies
+ // and tags for your request are to the upper size limit.
//
// Passing policies to this operation returns new temporary credentials. The
// resulting session's permissions are the intersection of the role's identity-based
@@ -1546,7 +1788,7 @@ type AssumeRoleWithSAMLInput struct {
// in the IAM User Guide.
//
// SAMLAssertion is a required field
- SAMLAssertion *string `min:"4" type:"string" required:"true"`
+ SAMLAssertion *string `min:"4" type:"string" required:"true" sensitive:"true"`
}
// String returns the string representation
@@ -1673,9 +1915,10 @@ type AssumeRoleWithSAMLOutput struct {
// ) )
NameQualifier *string `type:"string"`
- // A percentage value that indicates the size of the policy in packed form.
- // The service rejects any policy with a packed size greater than 100 percent,
- // which means the policy exceeded the allowed space.
+ // A percentage value that indicates the packed size of the session policies
+ // and session tags combined passed in the request. The request fails if the
+ // packed size is greater than 100 percent, which means the policies and tags
+ // exceeded the allowed space.
PackedPolicySize *int64 `type:"integer"`
// The value of the NameID element in the Subject element of the SAML assertion.
@@ -1786,16 +2029,16 @@ type AssumeRoleWithWebIdentityInput struct {
// in the IAM User Guide.
//
// The plain text that you use for both inline and managed session policies
- // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII
+ // can't exceed 2,048 characters. The JSON policy characters can be any ASCII
// character from the space character to the end of the valid character list
// (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A),
// and carriage return (\u000D) characters.
//
- // The characters in this parameter count towards the 2048 character session
- // policy guideline. However, an AWS conversion compresses the session policies
- // into a packed binary format that has a separate limit. This is the enforced
- // limit. The PackedPolicySize response element indicates by percentage how
- // close the policy is to the upper size limit.
+ // An AWS conversion compresses the passed session policies and session tags
+ // into a packed binary format that has a separate limit. Your request can fail
+ // for this limit even if your plain text meets the other requirements. The
+ // PackedPolicySize response element indicates by percentage how close the policies
+ // and tags for your request are to the upper size limit.
Policy *string `min:"1" type:"string"`
// The Amazon Resource Names (ARNs) of the IAM managed policies that you want
@@ -1804,15 +2047,15 @@ type AssumeRoleWithWebIdentityInput struct {
//
// This parameter is optional. You can provide up to 10 managed policy ARNs.
// However, the plain text that you use for both inline and managed session
- // policies shouldn't exceed 2048 characters. For more information about ARNs,
+ // policies can't exceed 2,048 characters. For more information about ARNs,
// see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
// in the AWS General Reference.
//
- // The characters in this parameter count towards the 2048 character session
- // policy guideline. However, an AWS conversion compresses the session policies
- // into a packed binary format that has a separate limit. This is the enforced
- // limit. The PackedPolicySize response element indicates by percentage how
- // close the policy is to the upper size limit.
+ // An AWS conversion compresses the passed session policies and session tags
+ // into a packed binary format that has a separate limit. Your request can fail
+ // for this limit even if your plain text meets the other requirements. The
+ // PackedPolicySize response element indicates by percentage how close the policies
+ // and tags for your request are to the upper size limit.
//
// Passing policies to this operation returns new temporary credentials. The
// resulting session's permissions are the intersection of the role's identity-based
@@ -1857,7 +2100,7 @@ type AssumeRoleWithWebIdentityInput struct {
// the application makes an AssumeRoleWithWebIdentity call.
//
// WebIdentityToken is a required field
- WebIdentityToken *string `min:"4" type:"string" required:"true"`
+ WebIdentityToken *string `min:"4" type:"string" required:"true" sensitive:"true"`
}
// String returns the string representation
@@ -1983,9 +2226,10 @@ type AssumeRoleWithWebIdentityOutput struct {
// We strongly recommend that you make no assumptions about the maximum size.
Credentials *Credentials `type:"structure"`
- // A percentage value that indicates the size of the policy in packed form.
- // The service rejects any policy with a packed size greater than 100 percent,
- // which means the policy exceeded the allowed space.
+ // A percentage value that indicates the packed size of the session policies
+ // and session tags combined passed in the request. The request fails if the
+ // packed size is greater than 100 percent, which means the policies and tags
+ // exceeded the allowed space.
PackedPolicySize *int64 `type:"integer"`
// The issuing authority of the web identity token presented. For OpenID Connect
@@ -2057,7 +2301,7 @@ type AssumedRoleUser struct {
// The ARN of the temporary security credentials that are returned from the
// AssumeRole action. For more information about ARNs and how to use them in
// policies, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html)
- // in Using IAM.
+ // in the IAM User Guide.
//
// Arn is a required field
Arn *string `min:"20" type:"string" required:"true"`
@@ -2225,7 +2469,7 @@ type FederatedUser struct {
// The ARN that specifies the federated user that is associated with the credentials.
// For more information about ARNs and how to use them in policies, see IAM
// Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html)
- // in Using IAM.
+ // in the IAM User Guide.
//
// Arn is a required field
Arn *string `min:"20" type:"string" required:"true"`
@@ -2265,7 +2509,7 @@ type GetAccessKeyInfoInput struct {
// The identifier of an access key.
//
// This parameter allows (through its regex pattern) a string of characters
- // that can consist of any upper- or lowercased letter or digit.
+ // that can consist of any upper- or lowercase letter or digit.
//
// AccessKeyId is a required field
AccessKeyId *string `min:"16" type:"string" required:"true"`
@@ -2418,10 +2662,7 @@ type GetFederationTokenInput struct {
// use as managed session policies.
//
// This parameter is optional. However, if you do not pass any session policies,
- // then the resulting federated user session has no permissions. The only exception
- // is when the credentials are used to access a resource that has a resource-based
- // policy that specifically references the federated user session in the Principal
- // element of the policy.
+ // then the resulting federated user session has no permissions.
//
// When you pass session policies, the session permissions are the intersection
// of the IAM user policies and the session policies that you pass. This gives
@@ -2431,17 +2672,23 @@ type GetFederationTokenInput struct {
// Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
// in the IAM User Guide.
//
+ // The resulting credentials can be used to access a resource that has a resource-based
+ // policy. If that policy specifically references the federated user session
+ // in the Principal element of the policy, the session has the permissions allowed
+ // by the policy. These permissions are granted in addition to the permissions
+ // that are granted by the session policies.
+ //
// The plain text that you use for both inline and managed session policies
- // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII
+ // can't exceed 2,048 characters. The JSON policy characters can be any ASCII
// character from the space character to the end of the valid character list
// (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A),
// and carriage return (\u000D) characters.
//
- // The characters in this parameter count towards the 2048 character session
- // policy guideline. However, an AWS conversion compresses the session policies
- // into a packed binary format that has a separate limit. This is the enforced
- // limit. The PackedPolicySize response element indicates by percentage how
- // close the policy is to the upper size limit.
+ // An AWS conversion compresses the passed session policies and session tags
+ // into a packed binary format that has a separate limit. Your request can fail
+ // for this limit even if your plain text meets the other requirements. The
+ // PackedPolicySize response element indicates by percentage how close the policies
+ // and tags for your request are to the upper size limit.
Policy *string `min:"1" type:"string"`
// The Amazon Resource Names (ARNs) of the IAM managed policies that you want
@@ -2452,16 +2699,13 @@ type GetFederationTokenInput struct {
// to this operation. You can pass a single JSON policy document to use as an
// inline session policy. You can also specify up to 10 managed policies to
// use as managed session policies. The plain text that you use for both inline
- // and managed session policies shouldn't exceed 2048 characters. You can provide
+ // and managed session policies can't exceed 2,048 characters. You can provide
// up to 10 managed policy ARNs. For more information about ARNs, see Amazon
// Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
// in the AWS General Reference.
//
// This parameter is optional. However, if you do not pass any session policies,
- // then the resulting federated user session has no permissions. The only exception
- // is when the credentials are used to access a resource that has a resource-based
- // policy that specifically references the federated user session in the Principal
- // element of the policy.
+ // then the resulting federated user session has no permissions.
//
// When you pass session policies, the session permissions are the intersection
// of the IAM user policies and the session policies that you pass. This gives
@@ -2471,12 +2715,46 @@ type GetFederationTokenInput struct {
// Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
// in the IAM User Guide.
//
- // The characters in this parameter count towards the 2048 character session
- // policy guideline. However, an AWS conversion compresses the session policies
- // into a packed binary format that has a separate limit. This is the enforced
- // limit. The PackedPolicySize response element indicates by percentage how
- // close the policy is to the upper size limit.
+ // The resulting credentials can be used to access a resource that has a resource-based
+ // policy. If that policy specifically references the federated user session
+ // in the Principal element of the policy, the session has the permissions allowed
+ // by the policy. These permissions are granted in addition to the permissions
+ // that are granted by the session policies.
+ //
+ // An AWS conversion compresses the passed session policies and session tags
+ // into a packed binary format that has a separate limit. Your request can fail
+ // for this limit even if your plain text meets the other requirements. The
+ // PackedPolicySize response element indicates by percentage how close the policies
+ // and tags for your request are to the upper size limit.
PolicyArns []*PolicyDescriptorType `type:"list"`
+
+ // A list of session tags. Each session tag consists of a key name and an associated
+ // value. For more information about session tags, see Passing Session Tags
+ // in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
+ // in the IAM User Guide.
+ //
+ // This parameter is optional. You can pass up to 50 session tags. The plain
+ // text session tag keys can’t exceed 128 characters and the values can’t
+ // exceed 256 characters. For these and additional limits, see IAM and STS Character
+ // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length)
+ // in the IAM User Guide.
+ //
+ // An AWS conversion compresses the passed session policies and session tags
+ // into a packed binary format that has a separate limit. Your request can fail
+ // for this limit even if your plain text meets the other requirements. The
+ // PackedPolicySize response element indicates by percentage how close the policies
+ // and tags for your request are to the upper size limit.
+ //
+ // You can pass a session tag with the same key as a tag that is already attached
+ // to the user you are federating. When you do, session tags override a user
+ // tag with the same key.
+ //
+ // Tag key–value pairs are not case sensitive, but case is preserved. This
+ // means that you cannot have separate Department and department tag keys. Assume
+ // that the role has the Department=Marketing tag and you pass the department=engineering
+ // session tag. Department and department are not saved as separate tags, and
+ // the session tag passed in the request takes precedence over the role tag.
+ Tags []*Tag `type:"list"`
}
// String returns the string representation
@@ -2514,6 +2792,16 @@ func (s *GetFederationTokenInput) Validate() error {
}
}
}
+ if s.Tags != nil {
+ for i, v := range s.Tags {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
if invalidParams.Len() > 0 {
return invalidParams
@@ -2545,6 +2833,12 @@ func (s *GetFederationTokenInput) SetPolicyArns(v []*PolicyDescriptorType) *GetF
return s
}
+// SetTags sets the Tags field's value.
+func (s *GetFederationTokenInput) SetTags(v []*Tag) *GetFederationTokenInput {
+ s.Tags = v
+ return s
+}
+
// Contains the response to a successful GetFederationToken request, including
// temporary AWS credentials that can be used to make AWS requests.
type GetFederationTokenOutput struct {
@@ -2563,9 +2857,10 @@ type GetFederationTokenOutput struct {
// an Amazon S3 bucket policy.
FederatedUser *FederatedUser `type:"structure"`
- // A percentage value indicating the size of the policy in packed form. The
- // service rejects policies for which the packed size is greater than 100 percent
- // of the allowed value.
+ // A percentage value that indicates the packed size of the session policies
+ // and session tags combined passed in the request. The request fails if the
+ // packed size is greater than 100 percent, which means the policies and tags
+ // exceeded the allowed space.
PackedPolicySize *int64 `type:"integer"`
}
@@ -2748,3 +3043,73 @@ func (s *PolicyDescriptorType) SetArn(v string) *PolicyDescriptorType {
s.Arn = &v
return s
}
+
+// You can pass custom key-value pair attributes when you assume a role or federate
+// a user. These are called session tags. You can then use the session tags
+// to control access to resources. For more information, see Tagging AWS STS
+// Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
+// in the IAM User Guide.
+type Tag struct {
+ _ struct{} `type:"structure"`
+
+ // The key for a session tag.
+ //
+ // You can pass up to 50 session tags. The plain text session tag keys can’t
+ // exceed 128 characters. For these and additional limits, see IAM and STS Character
+ // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length)
+ // in the IAM User Guide.
+ //
+ // Key is a required field
+ Key *string `min:"1" type:"string" required:"true"`
+
+ // The value for a session tag.
+ //
+ // You can pass up to 50 session tags. The plain text session tag values can’t
+ // exceed 256 characters. For these and additional limits, see IAM and STS Character
+ // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length)
+ // in the IAM User Guide.
+ //
+ // Value is a required field
+ Value *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s Tag) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Tag) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *Tag) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "Tag"}
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+ if s.Value == nil {
+ invalidParams.Add(request.NewErrParamRequired("Value"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetKey sets the Key field's value.
+func (s *Tag) SetKey(v string) *Tag {
+ s.Key = &v
+ return s
+}
+
+// SetValue sets the Value field's value.
+func (s *Tag) SetValue(v string) *Tag {
+ s.Value = &v
+ return s
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go
index 41ea09c3..a233f542 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go
@@ -14,11 +14,11 @@ const (
// ErrCodeIDPCommunicationErrorException for service response error code
// "IDPCommunicationError".
//
- // The request could not be fulfilled because the non-AWS identity provider
- // (IDP) that was asked to verify the incoming identity token could not be reached.
- // This is often a transient error caused by network conditions. Retry the request
+ // The request could not be fulfilled because the identity provider (IDP) that
+ // was asked to verify the incoming identity token could not be reached. This
+ // is often a transient error caused by network conditions. Retry the request
// a limited number of times so that you don't exceed the request rate. If the
- // error persists, the non-AWS identity provider might be down or not responding.
+ // error persists, the identity provider might be down or not responding.
ErrCodeIDPCommunicationErrorException = "IDPCommunicationError"
// ErrCodeIDPRejectedClaimException for service response error code
@@ -56,9 +56,18 @@ const (
// ErrCodePackedPolicyTooLargeException for service response error code
// "PackedPolicyTooLarge".
//
- // The request was rejected because the policy document was too large. The error
- // message describes how big the policy document is, in packed form, as a percentage
- // of what the API allows.
+ // The request was rejected because the total packed size of the session policies
+ // and session tags combined was too large. An AWS conversion compresses the
+ // session policy document, session policy ARNs, and session tags into a packed
+ // binary format that has a separate limit. The error message indicates by percentage
+ // how close the policies and tags are to the upper size limit. For more information,
+ // see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
+ // in the IAM User Guide.
+ //
+ // You could receive this error even though you meet other defined session policy
+ // and session tag limits. For more information, see IAM and STS Entity Character
+ // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+ // in the IAM User Guide.
ErrCodePackedPolicyTooLargeException = "PackedPolicyTooLarge"
// ErrCodeRegionDisabledException for service response error code
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go
index 185c914d..d34a6855 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go
@@ -31,7 +31,7 @@ var initRequest func(*request.Request)
const (
ServiceName = "sts" // Name of service.
EndpointsID = ServiceName // ID to lookup a service endpoint with.
- ServiceID = "STS" // ServiceID is a unique identifer of a specific service.
+ ServiceID = "STS" // ServiceID is a unique identifier of a specific service.
)
// New creates a new instance of the STS client with a session.
@@ -39,6 +39,8 @@ const (
// aws.Config parameter to add your extra config.
//
// Example:
+// mySession := session.Must(session.NewSession())
+//
// // Create a STS client from just a session.
// svc := sts.New(mySession)
//
@@ -46,11 +48,11 @@ const (
// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS {
c := p.ClientConfig(EndpointsID, cfgs...)
- return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName)
+ return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName)
}
// newClient creates, initializes and returns a new service client instance.
-func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *STS {
+func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *STS {
svc := &STS{
Client: client.New(
cfg,
@@ -59,6 +61,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio
ServiceID: ServiceID,
SigningName: signingName,
SigningRegion: signingRegion,
+ PartitionID: partitionID,
Endpoint: endpoint,
APIVersion: "2011-06-15",
},
diff --git a/vendor/github.com/jmespath/go-jmespath/.travis.yml b/vendor/github.com/jmespath/go-jmespath/.travis.yml
index 1f980775..730c7fa5 100644
--- a/vendor/github.com/jmespath/go-jmespath/.travis.yml
+++ b/vendor/github.com/jmespath/go-jmespath/.travis.yml
@@ -3,7 +3,15 @@ language: go
sudo: false
go:
- - 1.4
+ - 1.5.x
+ - 1.6.x
+ - 1.7.x
+ - 1.8.x
+ - 1.9.x
+ - 1.10.x
+ - 1.11.x
+ - 1.12.x
+ - 1.13.x
install: go get -v -t ./...
script: make test
diff --git a/vendor/github.com/jmespath/go-jmespath/README.md b/vendor/github.com/jmespath/go-jmespath/README.md
index 187ef676..110ad799 100644
--- a/vendor/github.com/jmespath/go-jmespath/README.md
+++ b/vendor/github.com/jmespath/go-jmespath/README.md
@@ -4,4 +4,84 @@
-See http://jmespath.org for more info.
+go-jmespath is a GO implementation of JMESPath,
+which is a query language for JSON. It will take a JSON
+document and transform it into another JSON document
+through a JMESPath expression.
+
+Using go-jmespath is really easy. There's a single function
+you use, `jmespath.search`:
+
+
+```go
+> import "github.com/jmespath/go-jmespath"
+>
+> var jsondata = []byte(`{"foo": {"bar": {"baz": [0, 1, 2, 3, 4]}}}`) // your data
+> var data interface{}
+> err := json.Unmarshal(jsondata, &data)
+> result, err := jmespath.Search("foo.bar.baz[2]", data)
+result = 2
+```
+
+In the example we gave the ``search`` function input data of
+`{"foo": {"bar": {"baz": [0, 1, 2, 3, 4]}}}` as well as the JMESPath
+expression `foo.bar.baz[2]`, and the `search` function evaluated
+the expression against the input data to produce the result ``2``.
+
+The JMESPath language can do a lot more than select an element
+from a list. Here are a few more examples:
+
+```go
+> var jsondata = []byte(`{"foo": {"bar": {"baz": [0, 1, 2, 3, 4]}}}`) // your data
+> var data interface{}
+> err := json.Unmarshal(jsondata, &data)
+> result, err := jmespath.search("foo.bar", data)
+result = { "baz": [ 0, 1, 2, 3, 4 ] }
+
+
+> var jsondata = []byte(`{"foo": [{"first": "a", "last": "b"},
+ {"first": "c", "last": "d"}]}`) // your data
+> var data interface{}
+> err := json.Unmarshal(jsondata, &data)
+> result, err := jmespath.search({"foo[*].first", data)
+result [ 'a', 'c' ]
+
+
+> var jsondata = []byte(`{"foo": [{"age": 20}, {"age": 25},
+ {"age": 30}, {"age": 35},
+ {"age": 40}]}`) // your data
+> var data interface{}
+> err := json.Unmarshal(jsondata, &data)
+> result, err := jmespath.search("foo[?age > `30`]")
+result = [ { age: 35 }, { age: 40 } ]
+```
+
+You can also pre-compile your query. This is usefull if
+you are going to run multiple searches with it:
+
+```go
+ > var jsondata = []byte(`{"foo": "bar"}`)
+ > var data interface{}
+ > err := json.Unmarshal(jsondata, &data)
+ > precompiled, err := Compile("foo")
+ > if err != nil{
+ > // ... handle the error
+ > }
+ > result, err := precompiled.Search(data)
+ result = "bar"
+```
+
+## More Resources
+
+The example above only show a small amount of what
+a JMESPath expression can do. If you want to take a
+tour of the language, the *best* place to go is the
+[JMESPath Tutorial](http://jmespath.org/tutorial.html).
+
+One of the best things about JMESPath is that it is
+implemented in many different programming languages including
+python, ruby, php, lua, etc. To see a complete list of libraries,
+check out the [JMESPath libraries page](http://jmespath.org/libraries.html).
+
+And finally, the full JMESPath specification can be found
+on the [JMESPath site](http://jmespath.org/specification.html).
diff --git a/vendor/github.com/jmespath/go-jmespath/api.go b/vendor/github.com/jmespath/go-jmespath/api.go
index 8e26ffee..010efe9b 100644
--- a/vendor/github.com/jmespath/go-jmespath/api.go
+++ b/vendor/github.com/jmespath/go-jmespath/api.go
@@ -2,7 +2,7 @@ package jmespath
import "strconv"
-// JMESPath is the epresentation of a compiled JMES path query. A JMESPath is
+// JMESPath is the representation of a compiled JMES path query. A JMESPath is
// safe for concurrent use by multiple goroutines.
type JMESPath struct {
ast ASTNode
diff --git a/vendor/github.com/jmespath/go-jmespath/parser.go b/vendor/github.com/jmespath/go-jmespath/parser.go
index 1240a175..4abc303a 100644
--- a/vendor/github.com/jmespath/go-jmespath/parser.go
+++ b/vendor/github.com/jmespath/go-jmespath/parser.go
@@ -137,7 +137,7 @@ func (p *Parser) Parse(expression string) (ASTNode, error) {
}
if p.current() != tEOF {
return ASTNode{}, p.syntaxError(fmt.Sprintf(
- "Unexpected token at the end of the expresssion: %s", p.current()))
+ "Unexpected token at the end of the expression: %s", p.current()))
}
return parsed, nil
}
diff --git a/vendor/github.com/pkg/errors/.travis.yml b/vendor/github.com/pkg/errors/.travis.yml
index 588ceca1..9159de03 100644
--- a/vendor/github.com/pkg/errors/.travis.yml
+++ b/vendor/github.com/pkg/errors/.travis.yml
@@ -1,11 +1,10 @@
language: go
go_import_path: github.com/pkg/errors
go:
- - 1.4.3
- - 1.5.4
- - 1.6.2
- - 1.7.1
+ - 1.11.x
+ - 1.12.x
+ - 1.13.x
- tip
script:
- - go test -v ./...
+ - make check
diff --git a/vendor/github.com/pkg/errors/Makefile b/vendor/github.com/pkg/errors/Makefile
new file mode 100644
index 00000000..ce9d7cde
--- /dev/null
+++ b/vendor/github.com/pkg/errors/Makefile
@@ -0,0 +1,44 @@
+PKGS := github.com/pkg/errors
+SRCDIRS := $(shell go list -f '{{.Dir}}' $(PKGS))
+GO := go
+
+check: test vet gofmt misspell unconvert staticcheck ineffassign unparam
+
+test:
+ $(GO) test $(PKGS)
+
+vet: | test
+ $(GO) vet $(PKGS)
+
+staticcheck:
+ $(GO) get honnef.co/go/tools/cmd/staticcheck
+ staticcheck -checks all $(PKGS)
+
+misspell:
+ $(GO) get github.com/client9/misspell/cmd/misspell
+ misspell \
+ -locale GB \
+ -error \
+ *.md *.go
+
+unconvert:
+ $(GO) get github.com/mdempsky/unconvert
+ unconvert -v $(PKGS)
+
+ineffassign:
+ $(GO) get github.com/gordonklaus/ineffassign
+ find $(SRCDIRS) -name '*.go' | xargs ineffassign
+
+pedantic: check errcheck
+
+unparam:
+ $(GO) get mvdan.cc/unparam
+ unparam ./...
+
+errcheck:
+ $(GO) get github.com/kisielk/errcheck
+ errcheck $(PKGS)
+
+gofmt:
+ @echo Checking code is gofmted
+ @test -z "$(shell gofmt -s -l -d -e $(SRCDIRS) | tee /dev/stderr)"
diff --git a/vendor/github.com/pkg/errors/README.md b/vendor/github.com/pkg/errors/README.md
index 273db3c9..54dfdcb1 100644
--- a/vendor/github.com/pkg/errors/README.md
+++ b/vendor/github.com/pkg/errors/README.md
@@ -1,4 +1,4 @@
-# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors)
+# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) [![Sourcegraph](https://sourcegraph.com/github.com/pkg/errors/-/badge.svg)](https://sourcegraph.com/github.com/pkg/errors?badge)
Package errors provides simple error handling primitives.
@@ -41,12 +41,19 @@ default:
[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors).
+## Roadmap
+
+With the upcoming [Go2 error proposals](https://go.googlesource.com/proposal/+/master/design/go2draft.md) this package is moving into maintenance mode. The roadmap for a 1.0 release is as follows:
+
+- 0.9. Remove pre Go 1.9 and Go 1.10 support, address outstanding pull requests (if possible)
+- 1.0. Final release.
+
## Contributing
-We welcome pull requests, bug fixes and issue reports. With that said, the bar for adding new symbols to this package is intentionally set high.
+Because of the Go2 errors changes, this package is not accepting proposals for new functionality. With that said, we welcome pull requests, bug fixes and issue reports.
-Before proposing a change, please discuss your change by raising an issue.
+Before sending a PR, please discuss your change by raising an issue.
-## Licence
+## License
BSD-2-Clause
diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go
index 842ee804..161aea25 100644
--- a/vendor/github.com/pkg/errors/errors.go
+++ b/vendor/github.com/pkg/errors/errors.go
@@ -6,7 +6,7 @@
// return err
// }
//
-// which applied recursively up the call stack results in error reports
+// which when applied recursively up the call stack results in error reports
// without context or debugging information. The errors package allows
// programmers to add context to the failure path in their code in a way
// that does not destroy the original value of the error.
@@ -15,16 +15,17 @@
//
// The errors.Wrap function returns a new error that adds context to the
// original error by recording a stack trace at the point Wrap is called,
-// and the supplied message. For example
+// together with the supplied message. For example
//
// _, err := ioutil.ReadAll(r)
// if err != nil {
// return errors.Wrap(err, "read failed")
// }
//
-// If additional control is required the errors.WithStack and errors.WithMessage
-// functions destructure errors.Wrap into its component operations of annotating
-// an error with a stack trace and an a message, respectively.
+// If additional control is required, the errors.WithStack and
+// errors.WithMessage functions destructure errors.Wrap into its component
+// operations: annotating an error with a stack trace and with a message,
+// respectively.
//
// Retrieving the cause of an error
//
@@ -38,7 +39,7 @@
// }
//
// can be inspected by errors.Cause. errors.Cause will recursively retrieve
-// the topmost error which does not implement causer, which is assumed to be
+// the topmost error that does not implement causer, which is assumed to be
// the original cause. For example:
//
// switch err := errors.Cause(err).(type) {
@@ -48,16 +49,16 @@
// // unknown error
// }
//
-// causer interface is not exported by this package, but is considered a part
-// of stable public API.
+// Although the causer interface is not exported by this package, it is
+// considered a part of its stable public interface.
//
// Formatted printing of errors
//
// All error values returned from this package implement fmt.Formatter and can
-// be formatted by the fmt package. The following verbs are supported
+// be formatted by the fmt package. The following verbs are supported:
//
// %s print the error. If the error has a Cause it will be
-// printed recursively
+// printed recursively.
// %v see %s
// %+v extended format. Each Frame of the error's StackTrace will
// be printed in detail.
@@ -65,13 +66,13 @@
// Retrieving the stack trace of an error or wrapper
//
// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are
-// invoked. This information can be retrieved with the following interface.
+// invoked. This information can be retrieved with the following interface:
//
// type stackTracer interface {
// StackTrace() errors.StackTrace
// }
//
-// Where errors.StackTrace is defined as
+// The returned errors.StackTrace type is defined as
//
// type StackTrace []Frame
//
@@ -81,12 +82,12 @@
//
// if err, ok := err.(stackTracer); ok {
// for _, f := range err.StackTrace() {
-// fmt.Printf("%+s:%d", f)
+// fmt.Printf("%+s:%d\n", f, f)
// }
// }
//
-// stackTracer interface is not exported by this package, but is considered a part
-// of stable public API.
+// Although the stackTracer interface is not exported by this package, it is
+// considered a part of its stable public interface.
//
// See the documentation for Frame.Format for more details.
package errors
@@ -158,6 +159,9 @@ type withStack struct {
func (w *withStack) Cause() error { return w.error }
+// Unwrap provides compatibility for Go 1.13 error chains.
+func (w *withStack) Unwrap() error { return w.error }
+
func (w *withStack) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
@@ -192,7 +196,7 @@ func Wrap(err error, message string) error {
}
// Wrapf returns an error annotating err with a stack trace
-// at the point Wrapf is call, and the format specifier.
+// at the point Wrapf is called, and the format specifier.
// If err is nil, Wrapf returns nil.
func Wrapf(err error, format string, args ...interface{}) error {
if err == nil {
@@ -220,6 +224,18 @@ func WithMessage(err error, message string) error {
}
}
+// WithMessagef annotates err with the format specifier.
+// If err is nil, WithMessagef returns nil.
+func WithMessagef(err error, format string, args ...interface{}) error {
+ if err == nil {
+ return nil
+ }
+ return &withMessage{
+ cause: err,
+ msg: fmt.Sprintf(format, args...),
+ }
+}
+
type withMessage struct {
cause error
msg string
@@ -228,6 +244,9 @@ type withMessage struct {
func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() }
func (w *withMessage) Cause() error { return w.cause }
+// Unwrap provides compatibility for Go 1.13 error chains.
+func (w *withMessage) Unwrap() error { return w.cause }
+
func (w *withMessage) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
diff --git a/vendor/github.com/pkg/errors/go113.go b/vendor/github.com/pkg/errors/go113.go
new file mode 100644
index 00000000..be0d10d0
--- /dev/null
+++ b/vendor/github.com/pkg/errors/go113.go
@@ -0,0 +1,38 @@
+// +build go1.13
+
+package errors
+
+import (
+ stderrors "errors"
+)
+
+// Is reports whether any error in err's chain matches target.
+//
+// The chain consists of err itself followed by the sequence of errors obtained by
+// repeatedly calling Unwrap.
+//
+// An error is considered to match a target if it is equal to that target or if
+// it implements a method Is(error) bool such that Is(target) returns true.
+func Is(err, target error) bool { return stderrors.Is(err, target) }
+
+// As finds the first error in err's chain that matches target, and if so, sets
+// target to that error value and returns true.
+//
+// The chain consists of err itself followed by the sequence of errors obtained by
+// repeatedly calling Unwrap.
+//
+// An error matches target if the error's concrete value is assignable to the value
+// pointed to by target, or if the error has a method As(interface{}) bool such that
+// As(target) returns true. In the latter case, the As method is responsible for
+// setting target.
+//
+// As will panic if target is not a non-nil pointer to either a type that implements
+// error, or to any interface type. As returns false if err is nil.
+func As(err error, target interface{}) bool { return stderrors.As(err, target) }
+
+// Unwrap returns the result of calling the Unwrap method on err, if err's
+// type contains an Unwrap method returning error.
+// Otherwise, Unwrap returns nil.
+func Unwrap(err error) error {
+ return stderrors.Unwrap(err)
+}
diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go
index 6b1f2891..779a8348 100644
--- a/vendor/github.com/pkg/errors/stack.go
+++ b/vendor/github.com/pkg/errors/stack.go
@@ -5,10 +5,13 @@ import (
"io"
"path"
"runtime"
+ "strconv"
"strings"
)
// Frame represents a program counter inside a stack frame.
+// For historical reasons if Frame is interpreted as a uintptr
+// its value represents the program counter + 1.
type Frame uintptr
// pc returns the program counter for this frame;
@@ -37,6 +40,15 @@ func (f Frame) line() int {
return line
}
+// name returns the name of this function, if known.
+func (f Frame) name() string {
+ fn := runtime.FuncForPC(f.pc())
+ if fn == nil {
+ return "unknown"
+ }
+ return fn.Name()
+}
+
// Format formats the frame according to the fmt.Formatter interface.
//
// %s source file
@@ -46,29 +58,24 @@ func (f Frame) line() int {
//
// Format accepts flags that alter the printing of some verbs, as follows:
//
-// %+s path of source file relative to the compile time GOPATH
+// %+s function name and path of source file relative to the compile time
+// GOPATH separated by \n\t (\n\t)
// %+v equivalent to %+s:%d
func (f Frame) Format(s fmt.State, verb rune) {
switch verb {
case 's':
switch {
case s.Flag('+'):
- pc := f.pc()
- fn := runtime.FuncForPC(pc)
- if fn == nil {
- io.WriteString(s, "unknown")
- } else {
- file, _ := fn.FileLine(pc)
- fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file)
- }
+ io.WriteString(s, f.name())
+ io.WriteString(s, "\n\t")
+ io.WriteString(s, f.file())
default:
io.WriteString(s, path.Base(f.file()))
}
case 'd':
- fmt.Fprintf(s, "%d", f.line())
+ io.WriteString(s, strconv.Itoa(f.line()))
case 'n':
- name := runtime.FuncForPC(f.pc()).Name()
- io.WriteString(s, funcname(name))
+ io.WriteString(s, funcname(f.name()))
case 'v':
f.Format(s, 's')
io.WriteString(s, ":")
@@ -76,25 +83,57 @@ func (f Frame) Format(s fmt.State, verb rune) {
}
}
+// MarshalText formats a stacktrace Frame as a text string. The output is the
+// same as that of fmt.Sprintf("%+v", f), but without newlines or tabs.
+func (f Frame) MarshalText() ([]byte, error) {
+ name := f.name()
+ if name == "unknown" {
+ return []byte(name), nil
+ }
+ return []byte(fmt.Sprintf("%s %s:%d", name, f.file(), f.line())), nil
+}
+
// StackTrace is stack of Frames from innermost (newest) to outermost (oldest).
type StackTrace []Frame
+// Format formats the stack of Frames according to the fmt.Formatter interface.
+//
+// %s lists source files for each Frame in the stack
+// %v lists the source file and line number for each Frame in the stack
+//
+// Format accepts flags that alter the printing of some verbs, as follows:
+//
+// %+v Prints filename, function, and line number for each Frame in the stack.
func (st StackTrace) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
switch {
case s.Flag('+'):
for _, f := range st {
- fmt.Fprintf(s, "\n%+v", f)
+ io.WriteString(s, "\n")
+ f.Format(s, verb)
}
case s.Flag('#'):
fmt.Fprintf(s, "%#v", []Frame(st))
default:
- fmt.Fprintf(s, "%v", []Frame(st))
+ st.formatSlice(s, verb)
}
case 's':
- fmt.Fprintf(s, "%s", []Frame(st))
+ st.formatSlice(s, verb)
+ }
+}
+
+// formatSlice will format this StackTrace into the given buffer as a slice of
+// Frame, only valid when called with '%s' or '%v'.
+func (st StackTrace) formatSlice(s fmt.State, verb rune) {
+ io.WriteString(s, "[")
+ for i, f := range st {
+ if i > 0 {
+ io.WriteString(s, " ")
+ }
+ f.Format(s, verb)
}
+ io.WriteString(s, "]")
}
// stack represents a stack of program counters.
@@ -136,43 +175,3 @@ func funcname(name string) string {
i = strings.Index(name, ".")
return name[i+1:]
}
-
-func trimGOPATH(name, file string) string {
- // Here we want to get the source file path relative to the compile time
- // GOPATH. As of Go 1.6.x there is no direct way to know the compiled
- // GOPATH at runtime, but we can infer the number of path segments in the
- // GOPATH. We note that fn.Name() returns the function name qualified by
- // the import path, which does not include the GOPATH. Thus we can trim
- // segments from the beginning of the file path until the number of path
- // separators remaining is one more than the number of path separators in
- // the function name. For example, given:
- //
- // GOPATH /home/user
- // file /home/user/src/pkg/sub/file.go
- // fn.Name() pkg/sub.Type.Method
- //
- // We want to produce:
- //
- // pkg/sub/file.go
- //
- // From this we can easily see that fn.Name() has one less path separator
- // than our desired output. We count separators from the end of the file
- // path until it finds two more than in the function name and then move
- // one character forward to preserve the initial path segment without a
- // leading separator.
- const sep = "/"
- goal := strings.Count(name, sep) + 2
- i := len(file)
- for n := 0; n < goal; n++ {
- i = strings.LastIndex(file[:i], sep)
- if i == -1 {
- // not enough separators found, set i so that the slice expression
- // below leaves file unmodified
- i = -len(sep)
- break
- }
- }
- // get back to 0 or trim the leading separator
- file = file[i+len(sep):]
- return file
-}
diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go
index a6140099..2a5399ec 100644
--- a/vendor/golang.org/x/net/http2/pipe.go
+++ b/vendor/golang.org/x/net/http2/pipe.go
@@ -17,6 +17,7 @@ type pipe struct {
mu sync.Mutex
c sync.Cond // c.L lazily initialized to &p.mu
b pipeBuffer // nil when done reading
+ unread int // bytes unread when done
err error // read error once empty. non-nil means closed.
breakErr error // immediate read error (caller doesn't see rest of b)
donec chan struct{} // closed on error
@@ -33,7 +34,7 @@ func (p *pipe) Len() int {
p.mu.Lock()
defer p.mu.Unlock()
if p.b == nil {
- return 0
+ return p.unread
}
return p.b.Len()
}
@@ -80,6 +81,7 @@ func (p *pipe) Write(d []byte) (n int, err error) {
return 0, errClosedPipeWrite
}
if p.breakErr != nil {
+ p.unread += len(d)
return len(d), nil // discard when there is no reader
}
return p.b.Write(d)
@@ -117,6 +119,9 @@ func (p *pipe) closeWithError(dst *error, err error, fn func()) {
}
p.readFn = fn
if dst == &p.breakErr {
+ if p.b != nil {
+ p.unread += p.b.Len()
+ }
p.b = nil
}
*dst = err
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
index d2ba820c..de31d72b 100644
--- a/vendor/golang.org/x/net/http2/server.go
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -252,7 +252,7 @@ func ConfigureServer(s *http.Server, conf *Server) error {
}
}
if !haveRequired {
- return fmt.Errorf("http2: TLSConfig.CipherSuites is missing an HTTP/2-required AES_128_GCM_SHA256 cipher.")
+ return fmt.Errorf("http2: TLSConfig.CipherSuites is missing an HTTP/2-required AES_128_GCM_SHA256 cipher (need at least one of TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 or TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256).")
}
}
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
index c51a73c0..d948e96e 100644
--- a/vendor/golang.org/x/net/http2/transport.go
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -93,7 +93,7 @@ type Transport struct {
// send in the initial settings frame. It is how many bytes
// of response headers are allowed. Unlike the http2 spec, zero here
// means to use a default limit (currently 10MB). If you actually
- // want to advertise an ulimited value to the peer, Transport
+ // want to advertise an unlimited value to the peer, Transport
// interprets the highest possible value here (0xffffffff or 1<<32-1)
// to mean no limit.
MaxHeaderListSize uint32
@@ -227,6 +227,7 @@ type ClientConn struct {
br *bufio.Reader
fr *Framer
lastActive time.Time
+ lastIdle time.Time // time last idle
// Settings from peer: (also guarded by mu)
maxFrameSize uint32
maxConcurrentStreams uint32
@@ -603,7 +604,7 @@ func (t *Transport) expectContinueTimeout() time.Duration {
}
func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
- return t.newClientConn(c, false)
+ return t.newClientConn(c, t.disableKeepAlives())
}
func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) {
@@ -736,7 +737,8 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) {
}
st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay &&
- int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32
+ int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 &&
+ !cc.tooIdleLocked()
st.freshConn = cc.nextStreamID == 1 && st.canTakeNewRequest
return
}
@@ -746,6 +748,16 @@ func (cc *ClientConn) canTakeNewRequestLocked() bool {
return st.canTakeNewRequest
}
+// tooIdleLocked reports whether this connection has been been sitting idle
+// for too much wall time.
+func (cc *ClientConn) tooIdleLocked() bool {
+ // The Round(0) strips the monontonic clock reading so the
+ // times are compared based on their wall time. We don't want
+ // to reuse a connection that's been sitting idle during
+ // VM/laptop suspend if monotonic time was also frozen.
+ return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && time.Since(cc.lastIdle.Round(0)) > cc.idleTimeout
+}
+
// onIdleTimeout is called from a time.AfterFunc goroutine. It will
// only be called when we're idle, but because we're coming from a new
// goroutine, there could be a new request coming in at the same time,
@@ -1150,6 +1162,7 @@ func (cc *ClientConn) awaitOpenSlotForRequest(req *http.Request) error {
}
return errClientConnUnusable
}
+ cc.lastIdle = time.Time{}
if int64(len(cc.streams))+1 <= int64(cc.maxConcurrentStreams) {
if waitingForConn != nil {
close(waitingForConn)
@@ -1478,7 +1491,29 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail
if vv[0] == "" {
continue
}
-
+ } else if strings.EqualFold(k, "cookie") {
+ // Per 8.1.2.5 To allow for better compression efficiency, the
+ // Cookie header field MAY be split into separate header fields,
+ // each with one or more cookie-pairs.
+ for _, v := range vv {
+ for {
+ p := strings.IndexByte(v, ';')
+ if p < 0 {
+ break
+ }
+ f("cookie", v[:p])
+ p++
+ // strip space after semicolon if any.
+ for p+1 <= len(v) && v[p] == ' ' {
+ p++
+ }
+ v = v[p:]
+ }
+ if len(v) > 0 {
+ f("cookie", v)
+ }
+ }
+ continue
}
for _, v := range vv {
@@ -1616,6 +1651,7 @@ func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream {
delete(cc.streams, id)
if len(cc.streams) == 0 && cc.idleTimer != nil {
cc.idleTimer.Reset(cc.idleTimeout)
+ cc.lastIdle = time.Now()
}
close(cs.done)
// Wake up checkResetOrDone via clientStream.awaitFlowControl and
diff --git a/vendor/golang.org/x/net/idna/tables11.0.0.go b/vendor/golang.org/x/net/idna/tables11.0.0.go
index c515d7ad..8ce0811f 100644
--- a/vendor/golang.org/x/net/idna/tables11.0.0.go
+++ b/vendor/golang.org/x/net/idna/tables11.0.0.go
@@ -1,6 +1,6 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
-// +build go1.13
+// +build go1.13,!go1.14
package idna
diff --git a/vendor/golang.org/x/net/idna/tables12.00.go b/vendor/golang.org/x/net/idna/tables12.00.go
new file mode 100644
index 00000000..f4b8ea36
--- /dev/null
+++ b/vendor/golang.org/x/net/idna/tables12.00.go
@@ -0,0 +1,4733 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+// +build go1.14
+
+package idna
+
+// UnicodeVersion is the Unicode version from which the tables in this package are derived.
+const UnicodeVersion = "12.0.0"
+
+var mappings string = "" + // Size: 8178 bytes
+ "\x00\x01 \x03 ̈\x01a\x03 ̄\x012\x013\x03 ́\x03 ̧\x011\x01o\x051⁄4\x051⁄2" +
+ "\x053⁄4\x03i̇\x03l·\x03ʼn\x01s\x03dž\x03ⱥ\x03ⱦ\x01h\x01j\x01r\x01w\x01y" +
+ "\x03 ̆\x03 ̇\x03 ̊\x03 ̨\x03 ̃\x03 ̋\x01l\x01x\x04̈́\x03 ι\x01;\x05 ̈́" +
+ "\x04եւ\x04اٴ\x04وٴ\x04ۇٴ\x04يٴ\x06क़\x06ख़\x06ग़\x06ज़\x06ड़\x06ढ़\x06फ़" +
+ "\x06य़\x06ড়\x06ঢ়\x06য়\x06ਲ਼\x06ਸ਼\x06ਖ਼\x06ਗ਼\x06ਜ਼\x06ਫ਼\x06ଡ଼\x06ଢ଼" +
+ "\x06ํา\x06ໍາ\x06ຫນ\x06ຫມ\x06གྷ\x06ཌྷ\x06དྷ\x06བྷ\x06ཛྷ\x06ཀྵ\x06ཱི\x06ཱུ" +
+ "\x06ྲྀ\x09ྲཱྀ\x06ླྀ\x09ླཱྀ\x06ཱྀ\x06ྒྷ\x06ྜྷ\x06ྡྷ\x06ྦྷ\x06ྫྷ\x06ྐྵ\x02" +
+ "в\x02д\x02о\x02с\x02т\x02ъ\x02ѣ\x02æ\x01b\x01d\x01e\x02ǝ\x01g\x01i\x01k" +
+ "\x01m\x01n\x02ȣ\x01p\x01t\x01u\x02ɐ\x02ɑ\x02ə\x02ɛ\x02ɜ\x02ŋ\x02ɔ\x02ɯ" +
+ "\x01v\x02β\x02γ\x02δ\x02φ\x02χ\x02ρ\x02н\x02ɒ\x01c\x02ɕ\x02ð\x01f\x02ɟ" +
+ "\x02ɡ\x02ɥ\x02ɨ\x02ɩ\x02ɪ\x02ʝ\x02ɭ\x02ʟ\x02ɱ\x02ɰ\x02ɲ\x02ɳ\x02ɴ\x02ɵ" +
+ "\x02ɸ\x02ʂ\x02ʃ\x02ƫ\x02ʉ\x02ʊ\x02ʋ\x02ʌ\x01z\x02ʐ\x02ʑ\x02ʒ\x02θ\x02ss" +
+ "\x02ά\x02έ\x02ή\x02ί\x02ό\x02ύ\x02ώ\x05ἀι\x05ἁι\x05ἂι\x05ἃι\x05ἄι\x05ἅι" +
+ "\x05ἆι\x05ἇι\x05ἠι\x05ἡι\x05ἢι\x05ἣι\x05ἤι\x05ἥι\x05ἦι\x05ἧι\x05ὠι\x05ὡι" +
+ "\x05ὢι\x05ὣι\x05ὤι\x05ὥι\x05ὦι\x05ὧι\x05ὰι\x04αι\x04άι\x05ᾶι\x02ι\x05 ̈͂" +
+ "\x05ὴι\x04ηι\x04ήι\x05ῆι\x05 ̓̀\x05 ̓́\x05 ̓͂\x02ΐ\x05 ̔̀\x05 ̔́\x05 ̔͂" +
+ "\x02ΰ\x05 ̈̀\x01`\x05ὼι\x04ωι\x04ώι\x05ῶι\x06′′\x09′′′\x06‵‵\x09‵‵‵\x02!" +
+ "!\x02??\x02?!\x02!?\x0c′′′′\x010\x014\x015\x016\x017\x018\x019\x01+\x01=" +
+ "\x01(\x01)\x02rs\x02ħ\x02no\x01q\x02sm\x02tm\x02ω\x02å\x02א\x02ב\x02ג" +
+ "\x02ד\x02π\x051⁄7\x051⁄9\x061⁄10\x051⁄3\x052⁄3\x051⁄5\x052⁄5\x053⁄5\x054" +
+ "⁄5\x051⁄6\x055⁄6\x051⁄8\x053⁄8\x055⁄8\x057⁄8\x041⁄\x02ii\x02iv\x02vi" +
+ "\x04viii\x02ix\x02xi\x050⁄3\x06∫∫\x09∫∫∫\x06∮∮\x09∮∮∮\x0210\x0211\x0212" +
+ "\x0213\x0214\x0215\x0216\x0217\x0218\x0219\x0220\x04(10)\x04(11)\x04(12)" +
+ "\x04(13)\x04(14)\x04(15)\x04(16)\x04(17)\x04(18)\x04(19)\x04(20)\x0c∫∫∫∫" +
+ "\x02==\x05⫝̸\x02ɫ\x02ɽ\x02ȿ\x02ɀ\x01.\x04 ゙\x04 ゚\x06より\x06コト\x05(ᄀ)\x05" +
+ "(ᄂ)\x05(ᄃ)\x05(ᄅ)\x05(ᄆ)\x05(ᄇ)\x05(ᄉ)\x05(ᄋ)\x05(ᄌ)\x05(ᄎ)\x05(ᄏ)\x05(ᄐ" +
+ ")\x05(ᄑ)\x05(ᄒ)\x05(가)\x05(나)\x05(다)\x05(라)\x05(마)\x05(바)\x05(사)\x05(아)" +
+ "\x05(자)\x05(차)\x05(카)\x05(타)\x05(파)\x05(하)\x05(주)\x08(오전)\x08(오후)\x05(一)" +
+ "\x05(二)\x05(三)\x05(四)\x05(五)\x05(六)\x05(七)\x05(八)\x05(九)\x05(十)\x05(月)" +
+ "\x05(火)\x05(水)\x05(木)\x05(金)\x05(土)\x05(日)\x05(株)\x05(有)\x05(社)\x05(名)" +
+ "\x05(特)\x05(財)\x05(祝)\x05(労)\x05(代)\x05(呼)\x05(学)\x05(監)\x05(企)\x05(資)" +
+ "\x05(協)\x05(祭)\x05(休)\x05(自)\x05(至)\x0221\x0222\x0223\x0224\x0225\x0226" +
+ "\x0227\x0228\x0229\x0230\x0231\x0232\x0233\x0234\x0235\x06참고\x06주의\x0236" +
+ "\x0237\x0238\x0239\x0240\x0241\x0242\x0243\x0244\x0245\x0246\x0247\x0248" +
+ "\x0249\x0250\x041月\x042月\x043月\x044月\x045月\x046月\x047月\x048月\x049月\x0510" +
+ "月\x0511月\x0512月\x02hg\x02ev\x0cアパート\x0cアルファ\x0cアンペア\x09アール\x0cイニング\x09" +
+ "インチ\x09ウォン\x0fエスクード\x0cエーカー\x09オンス\x09オーム\x09カイリ\x0cカラット\x0cカロリー\x09ガロ" +
+ "ン\x09ガンマ\x06ギガ\x09ギニー\x0cキュリー\x0cギルダー\x06キロ\x0fキログラム\x12キロメートル\x0fキロワッ" +
+ "ト\x09グラム\x0fグラムトン\x0fクルゼイロ\x0cクローネ\x09ケース\x09コルナ\x09コーポ\x0cサイクル\x0fサンチ" +
+ "ーム\x0cシリング\x09センチ\x09セント\x09ダース\x06デシ\x06ドル\x06トン\x06ナノ\x09ノット\x09ハイツ" +
+ "\x0fパーセント\x09パーツ\x0cバーレル\x0fピアストル\x09ピクル\x06ピコ\x06ビル\x0fファラッド\x0cフィート" +
+ "\x0fブッシェル\x09フラン\x0fヘクタール\x06ペソ\x09ペニヒ\x09ヘルツ\x09ペンス\x09ページ\x09ベータ\x0cポイ" +
+ "ント\x09ボルト\x06ホン\x09ポンド\x09ホール\x09ホーン\x0cマイクロ\x09マイル\x09マッハ\x09マルク\x0fマ" +
+ "ンション\x0cミクロン\x06ミリ\x0fミリバール\x06メガ\x0cメガトン\x0cメートル\x09ヤード\x09ヤール\x09ユアン" +
+ "\x0cリットル\x06リラ\x09ルピー\x0cルーブル\x06レム\x0fレントゲン\x09ワット\x040点\x041点\x042点" +
+ "\x043点\x044点\x045点\x046点\x047点\x048点\x049点\x0510点\x0511点\x0512点\x0513点" +
+ "\x0514点\x0515点\x0516点\x0517点\x0518点\x0519点\x0520点\x0521点\x0522点\x0523点" +
+ "\x0524点\x02da\x02au\x02ov\x02pc\x02dm\x02iu\x06平成\x06昭和\x06大正\x06明治\x0c株" +
+ "式会社\x02pa\x02na\x02ma\x02ka\x02kb\x02mb\x02gb\x04kcal\x02pf\x02nf\x02m" +
+ "g\x02kg\x02hz\x02ml\x02dl\x02kl\x02fm\x02nm\x02mm\x02cm\x02km\x02m2\x02m" +
+ "3\x05m∕s\x06m∕s2\x07rad∕s\x08rad∕s2\x02ps\x02ns\x02ms\x02pv\x02nv\x02mv" +
+ "\x02kv\x02pw\x02nw\x02mw\x02kw\x02bq\x02cc\x02cd\x06c∕kg\x02db\x02gy\x02" +
+ "ha\x02hp\x02in\x02kk\x02kt\x02lm\x02ln\x02lx\x02ph\x02pr\x02sr\x02sv\x02" +
+ "wb\x05v∕m\x05a∕m\x041日\x042日\x043日\x044日\x045日\x046日\x047日\x048日\x049日" +
+ "\x0510日\x0511日\x0512日\x0513日\x0514日\x0515日\x0516日\x0517日\x0518日\x0519日" +
+ "\x0520日\x0521日\x0522日\x0523日\x0524日\x0525日\x0526日\x0527日\x0528日\x0529日" +
+ "\x0530日\x0531日\x02ь\x02ɦ\x02ɬ\x02ʞ\x02ʇ\x02œ\x04𤋮\x04𢡊\x04𢡄\x04𣏕\x04𥉉" +
+ "\x04𥳐\x04𧻓\x02ff\x02fi\x02fl\x02st\x04մն\x04մե\x04մի\x04վն\x04մխ\x04יִ" +
+ "\x04ײַ\x02ע\x02ה\x02כ\x02ל\x02ם\x02ר\x02ת\x04שׁ\x04שׂ\x06שּׁ\x06שּׂ\x04א" +
+ "ַ\x04אָ\x04אּ\x04בּ\x04גּ\x04דּ\x04הּ\x04וּ\x04זּ\x04טּ\x04יּ\x04ךּ\x04" +
+ "כּ\x04לּ\x04מּ\x04נּ\x04סּ\x04ףּ\x04פּ\x04צּ\x04קּ\x04רּ\x04שּ\x04תּ" +
+ "\x04וֹ\x04בֿ\x04כֿ\x04פֿ\x04אל\x02ٱ\x02ٻ\x02پ\x02ڀ\x02ٺ\x02ٿ\x02ٹ\x02ڤ" +
+ "\x02ڦ\x02ڄ\x02ڃ\x02چ\x02ڇ\x02ڍ\x02ڌ\x02ڎ\x02ڈ\x02ژ\x02ڑ\x02ک\x02گ\x02ڳ" +
+ "\x02ڱ\x02ں\x02ڻ\x02ۀ\x02ہ\x02ھ\x02ے\x02ۓ\x02ڭ\x02ۇ\x02ۆ\x02ۈ\x02ۋ\x02ۅ" +
+ "\x02ۉ\x02ې\x02ى\x04ئا\x04ئە\x04ئو\x04ئۇ\x04ئۆ\x04ئۈ\x04ئې\x04ئى\x02ی\x04" +
+ "ئج\x04ئح\x04ئم\x04ئي\x04بج\x04بح\x04بخ\x04بم\x04بى\x04بي\x04تج\x04تح" +
+ "\x04تخ\x04تم\x04تى\x04تي\x04ثج\x04ثم\x04ثى\x04ثي\x04جح\x04جم\x04حج\x04حم" +
+ "\x04خج\x04خح\x04خم\x04سج\x04سح\x04سخ\x04سم\x04صح\x04صم\x04ضج\x04ضح\x04ضخ" +
+ "\x04ضم\x04طح\x04طم\x04ظم\x04عج\x04عم\x04غج\x04غم\x04فج\x04فح\x04فخ\x04فم" +
+ "\x04فى\x04في\x04قح\x04قم\x04قى\x04قي\x04كا\x04كج\x04كح\x04كخ\x04كل\x04كم" +
+ "\x04كى\x04كي\x04لج\x04لح\x04لخ\x04لم\x04لى\x04لي\x04مج\x04مح\x04مخ\x04مم" +
+ "\x04مى\x04مي\x04نج\x04نح\x04نخ\x04نم\x04نى\x04ني\x04هج\x04هم\x04هى\x04هي" +
+ "\x04يج\x04يح\x04يخ\x04يم\x04يى\x04يي\x04ذٰ\x04رٰ\x04ىٰ\x05 ٌّ\x05 ٍّ\x05" +
+ " َّ\x05 ُّ\x05 ِّ\x05 ّٰ\x04ئر\x04ئز\x04ئن\x04بر\x04بز\x04بن\x04تر\x04تز" +
+ "\x04تن\x04ثر\x04ثز\x04ثن\x04ما\x04نر\x04نز\x04نن\x04ير\x04يز\x04ين\x04ئخ" +
+ "\x04ئه\x04به\x04ته\x04صخ\x04له\x04نه\x04هٰ\x04يه\x04ثه\x04سه\x04شم\x04شه" +
+ "\x06ـَّ\x06ـُّ\x06ـِّ\x04طى\x04طي\x04عى\x04عي\x04غى\x04غي\x04سى\x04سي" +
+ "\x04شى\x04شي\x04حى\x04حي\x04جى\x04جي\x04خى\x04خي\x04صى\x04صي\x04ضى\x04ضي" +
+ "\x04شج\x04شح\x04شخ\x04شر\x04سر\x04صر\x04ضر\x04اً\x06تجم\x06تحج\x06تحم" +
+ "\x06تخم\x06تمج\x06تمح\x06تمخ\x06جمح\x06حمي\x06حمى\x06سحج\x06سجح\x06سجى" +
+ "\x06سمح\x06سمج\x06سمم\x06صحح\x06صمم\x06شحم\x06شجي\x06شمخ\x06شمم\x06ضحى" +
+ "\x06ضخم\x06طمح\x06طمم\x06طمي\x06عجم\x06عمم\x06عمى\x06غمم\x06غمي\x06غمى" +
+ "\x06فخم\x06قمح\x06قمم\x06لحم\x06لحي\x06لحى\x06لجج\x06لخم\x06لمح\x06محج" +
+ "\x06محم\x06محي\x06مجح\x06مجم\x06مخج\x06مخم\x06مجخ\x06همج\x06همم\x06نحم" +
+ "\x06نحى\x06نجم\x06نجى\x06نمي\x06نمى\x06يمم\x06بخي\x06تجي\x06تجى\x06تخي" +
+ "\x06تخى\x06تمي\x06تمى\x06جمي\x06جحى\x06جمى\x06سخى\x06صحي\x06شحي\x06ضحي" +
+ "\x06لجي\x06لمي\x06يحي\x06يجي\x06يمي\x06ممي\x06قمي\x06نحي\x06عمي\x06كمي" +
+ "\x06نجح\x06مخي\x06لجم\x06كمم\x06جحي\x06حجي\x06مجي\x06فمي\x06بحي\x06سخي" +
+ "\x06نجي\x06صلے\x06قلے\x08الله\x08اكبر\x08محمد\x08صلعم\x08رسول\x08عليه" +
+ "\x08وسلم\x06صلى!صلى الله عليه وسلم\x0fجل جلاله\x08ریال\x01,\x01:\x01!" +
+ "\x01?\x01_\x01{\x01}\x01[\x01]\x01#\x01&\x01*\x01-\x01<\x01>\x01\\\x01$" +
+ "\x01%\x01@\x04ـً\x04ـَ\x04ـُ\x04ـِ\x04ـّ\x04ـْ\x02ء\x02آ\x02أ\x02ؤ\x02إ" +
+ "\x02ئ\x02ا\x02ب\x02ة\x02ت\x02ث\x02ج\x02ح\x02خ\x02د\x02ذ\x02ر\x02ز\x02س" +
+ "\x02ش\x02ص\x02ض\x02ط\x02ظ\x02ع\x02غ\x02ف\x02ق\x02ك\x02ل\x02م\x02ن\x02ه" +
+ "\x02و\x02ي\x04لآ\x04لأ\x04لإ\x04لا\x01\x22\x01'\x01/\x01^\x01|\x01~\x02¢" +
+ "\x02£\x02¬\x02¦\x02¥\x08𝅗𝅥\x08𝅘𝅥\x0c𝅘𝅥𝅮\x0c𝅘𝅥𝅯\x0c𝅘𝅥𝅰\x0c𝅘𝅥𝅱\x0c𝅘𝅥𝅲\x08𝆹" +
+ "𝅥\x08𝆺𝅥\x0c𝆹𝅥𝅮\x0c𝆺𝅥𝅮\x0c𝆹𝅥𝅯\x0c𝆺𝅥𝅯\x02ı\x02ȷ\x02α\x02ε\x02ζ\x02η\x02" +
+ "κ\x02λ\x02μ\x02ν\x02ξ\x02ο\x02σ\x02τ\x02υ\x02ψ\x03∇\x03∂\x02ϝ\x02ٮ\x02ڡ" +
+ "\x02ٯ\x020,\x021,\x022,\x023,\x024,\x025,\x026,\x027,\x028,\x029,\x03(a)" +
+ "\x03(b)\x03(c)\x03(d)\x03(e)\x03(f)\x03(g)\x03(h)\x03(i)\x03(j)\x03(k)" +
+ "\x03(l)\x03(m)\x03(n)\x03(o)\x03(p)\x03(q)\x03(r)\x03(s)\x03(t)\x03(u)" +
+ "\x03(v)\x03(w)\x03(x)\x03(y)\x03(z)\x07〔s〕\x02wz\x02hv\x02sd\x03ppv\x02w" +
+ "c\x02mc\x02md\x02mr\x02dj\x06ほか\x06ココ\x03サ\x03手\x03字\x03双\x03デ\x03二\x03多" +
+ "\x03解\x03天\x03交\x03映\x03無\x03料\x03前\x03後\x03再\x03新\x03初\x03終\x03生\x03販" +
+ "\x03声\x03吹\x03演\x03投\x03捕\x03一\x03三\x03遊\x03左\x03中\x03右\x03指\x03走\x03打" +
+ "\x03禁\x03空\x03合\x03満\x03有\x03月\x03申\x03割\x03営\x03配\x09〔本〕\x09〔三〕\x09〔二〕" +
+ "\x09〔安〕\x09〔点〕\x09〔打〕\x09〔盗〕\x09〔勝〕\x09〔敗〕\x03得\x03可\x03丽\x03丸\x03乁\x03你" +
+ "\x03侮\x03侻\x03倂\x03偺\x03備\x03僧\x03像\x03㒞\x03免\x03兔\x03兤\x03具\x03㒹\x03內" +
+ "\x03冗\x03冤\x03仌\x03冬\x03况\x03凵\x03刃\x03㓟\x03刻\x03剆\x03剷\x03㔕\x03勇\x03勉" +
+ "\x03勤\x03勺\x03包\x03匆\x03北\x03卉\x03卑\x03博\x03即\x03卽\x03卿\x03灰\x03及\x03叟" +
+ "\x03叫\x03叱\x03吆\x03咞\x03吸\x03呈\x03周\x03咢\x03哶\x03唐\x03啓\x03啣\x03善\x03喙" +
+ "\x03喫\x03喳\x03嗂\x03圖\x03嘆\x03圗\x03噑\x03噴\x03切\x03壮\x03城\x03埴\x03堍\x03型" +
+ "\x03堲\x03報\x03墬\x03売\x03壷\x03夆\x03夢\x03奢\x03姬\x03娛\x03娧\x03姘\x03婦\x03㛮" +
+ "\x03嬈\x03嬾\x03寃\x03寘\x03寧\x03寳\x03寿\x03将\x03尢\x03㞁\x03屠\x03屮\x03峀\x03岍" +
+ "\x03嵃\x03嵮\x03嵫\x03嵼\x03巡\x03巢\x03㠯\x03巽\x03帨\x03帽\x03幩\x03㡢\x03㡼\x03庰" +
+ "\x03庳\x03庶\x03廊\x03廾\x03舁\x03弢\x03㣇\x03形\x03彫\x03㣣\x03徚\x03忍\x03志\x03忹" +
+ "\x03悁\x03㤺\x03㤜\x03悔\x03惇\x03慈\x03慌\x03慎\x03慺\x03憎\x03憲\x03憤\x03憯\x03懞" +
+ "\x03懲\x03懶\x03成\x03戛\x03扝\x03抱\x03拔\x03捐\x03挽\x03拼\x03捨\x03掃\x03揤\x03搢" +
+ "\x03揅\x03掩\x03㨮\x03摩\x03摾\x03撝\x03摷\x03㩬\x03敏\x03敬\x03旣\x03書\x03晉\x03㬙" +
+ "\x03暑\x03㬈\x03㫤\x03冒\x03冕\x03最\x03暜\x03肭\x03䏙\x03朗\x03望\x03朡\x03杞\x03杓" +
+ "\x03㭉\x03柺\x03枅\x03桒\x03梅\x03梎\x03栟\x03椔\x03㮝\x03楂\x03榣\x03槪\x03檨\x03櫛" +
+ "\x03㰘\x03次\x03歔\x03㱎\x03歲\x03殟\x03殺\x03殻\x03汎\x03沿\x03泍\x03汧\x03洖\x03派" +
+ "\x03海\x03流\x03浩\x03浸\x03涅\x03洴\x03港\x03湮\x03㴳\x03滋\x03滇\x03淹\x03潮\x03濆" +
+ "\x03瀹\x03瀞\x03瀛\x03㶖\x03灊\x03災\x03灷\x03炭\x03煅\x03熜\x03爨\x03爵\x03牐\x03犀" +
+ "\x03犕\x03獺\x03王\x03㺬\x03玥\x03㺸\x03瑇\x03瑜\x03瑱\x03璅\x03瓊\x03㼛\x03甤\x03甾" +
+ "\x03異\x03瘐\x03㿼\x03䀈\x03直\x03眞\x03真\x03睊\x03䀹\x03瞋\x03䁆\x03䂖\x03硎\x03碌" +
+ "\x03磌\x03䃣\x03祖\x03福\x03秫\x03䄯\x03穀\x03穊\x03穏\x03䈂\x03篆\x03築\x03䈧\x03糒" +
+ "\x03䊠\x03糨\x03糣\x03紀\x03絣\x03䌁\x03緇\x03縂\x03繅\x03䌴\x03䍙\x03罺\x03羕\x03翺" +
+ "\x03者\x03聠\x03聰\x03䏕\x03育\x03脃\x03䐋\x03脾\x03媵\x03舄\x03辞\x03䑫\x03芑\x03芋" +
+ "\x03芝\x03劳\x03花\x03芳\x03芽\x03苦\x03若\x03茝\x03荣\x03莭\x03茣\x03莽\x03菧\x03著" +
+ "\x03荓\x03菊\x03菌\x03菜\x03䔫\x03蓱\x03蓳\x03蔖\x03蕤\x03䕝\x03䕡\x03䕫\x03虐\x03虜" +
+ "\x03虧\x03虩\x03蚩\x03蚈\x03蜎\x03蛢\x03蝹\x03蜨\x03蝫\x03螆\x03蟡\x03蠁\x03䗹\x03衠" +
+ "\x03衣\x03裗\x03裞\x03䘵\x03裺\x03㒻\x03䚾\x03䛇\x03誠\x03諭\x03變\x03豕\x03貫\x03賁" +
+ "\x03贛\x03起\x03跋\x03趼\x03跰\x03軔\x03輸\x03邔\x03郱\x03鄑\x03鄛\x03鈸\x03鋗\x03鋘" +
+ "\x03鉼\x03鏹\x03鐕\x03開\x03䦕\x03閷\x03䧦\x03雃\x03嶲\x03霣\x03䩮\x03䩶\x03韠\x03䪲" +
+ "\x03頋\x03頩\x03飢\x03䬳\x03餩\x03馧\x03駂\x03駾\x03䯎\x03鬒\x03鱀\x03鳽\x03䳎\x03䳭" +
+ "\x03鵧\x03䳸\x03麻\x03䵖\x03黹\x03黾\x03鼅\x03鼏\x03鼖\x03鼻"
+
+var xorData string = "" + // Size: 4862 bytes
+ "\x02\x0c\x09\x02\xb0\xec\x02\xad\xd8\x02\xad\xd9\x02\x06\x07\x02\x0f\x12" +
+ "\x02\x0f\x1f\x02\x0f\x1d\x02\x01\x13\x02\x0f\x16\x02\x0f\x0b\x02\x0f3" +
+ "\x02\x0f7\x02\x0f?\x02\x0f/\x02\x0f*\x02\x0c&\x02\x0c*\x02\x0c;\x02\x0c9" +
+ "\x02\x0c%\x02\xab\xed\x02\xab\xe2\x02\xab\xe3\x02\xa9\xe0\x02\xa9\xe1" +
+ "\x02\xa9\xe6\x02\xa3\xcb\x02\xa3\xc8\x02\xa3\xc9\x02\x01#\x02\x01\x08" +
+ "\x02\x0e>\x02\x0e'\x02\x0f\x03\x02\x03\x0d\x02\x03\x09\x02\x03\x17\x02" +
+ "\x03\x0e\x02\x02\x03\x02\x011\x02\x01\x00\x02\x01\x10\x02\x03<\x02\x07" +
+ "\x0d\x02\x02\x0c\x02\x0c0\x02\x01\x03\x02\x01\x01\x02\x01 \x02\x01\x22" +
+ "\x02\x01)\x02\x01\x0a\x02\x01\x0c\x02\x02\x06\x02\x02\x02\x02\x03\x10" +
+ "\x03\x037 \x03\x0b+\x03\x021\x00\x02\x01\x04\x02\x01\x02\x02\x019\x02" +
+ "\x03\x1c\x02\x02$\x03\x80p$\x02\x03:\x02\x03\x0a\x03\xc1r.\x03\xc1r,\x03" +
+ "\xc1r\x02\x02\x02:\x02\x02>\x02\x02,\x02\x02\x10\x02\x02\x00\x03\xc1s<" +
+ "\x03\xc1s*\x03\xc2L$\x03\xc2L;\x02\x09)\x02\x0a\x19\x03\x83\xab\xe3\x03" +
+ "\x83\xab\xf2\x03 4\xe0\x03\x81\xab\xea\x03\x81\xab\xf3\x03 4\xef\x03\x96" +
+ "\xe1\xcd\x03\x84\xe5\xc3\x02\x0d\x11\x03\x8b\xec\xcb\x03\x94\xec\xcf\x03" +
+ "\x9a\xec\xc2\x03\x8b\xec\xdb\x03\x94\xec\xdf\x03\x9a\xec\xd2\x03\x01\x0c" +
+ "!\x03\x01\x0c#\x03ʠ\x9d\x03ʣ\x9c\x03ʢ\x9f\x03ʥ\x9e\x03ʤ\x91\x03ʧ\x90\x03" +
+ "ʦ\x93\x03ʩ\x92\x03ʨ\x95\x03\xca\xf3\xb5\x03\xca\xf0\xb4\x03\xca\xf1\xb7" +
+ "\x03\xca\xf6\xb6\x03\xca\xf7\x89\x03\xca\xf4\x88\x03\xca\xf5\x8b\x03\xca" +
+ "\xfa\x8a\x03\xca\xfb\x8d\x03\xca\xf8\x8c\x03\xca\xf9\x8f\x03\xca\xfe\x8e" +
+ "\x03\xca\xff\x81\x03\xca\xfc\x80\x03\xca\xfd\x83\x03\xca\xe2\x82\x03\xca" +
+ "\xe3\x85\x03\xca\xe0\x84\x03\xca\xe1\x87\x03\xca\xe6\x86\x03\xca\xe7\x99" +
+ "\x03\xca\xe4\x98\x03\xca\xe5\x9b\x03\xca\xea\x9a\x03\xca\xeb\x9d\x03\xca" +
+ "\xe8\x9c\x03ؓ\x89\x03ߔ\x8b\x02\x010\x03\x03\x04\x1e\x03\x04\x15\x12\x03" +
+ "\x0b\x05,\x03\x06\x04\x00\x03\x06\x04)\x03\x06\x044\x03\x06\x04<\x03\x06" +
+ "\x05\x1d\x03\x06\x06\x00\x03\x06\x06\x0a\x03\x06\x06'\x03\x06\x062\x03" +
+ "\x0786\x03\x079/\x03\x079 \x03\x07:\x0e\x03\x07:\x1b\x03\x07:%\x03\x07;/" +
+ "\x03\x07;%\x03\x074\x11\x03\x076\x09\x03\x077*\x03\x070\x01\x03\x070\x0f" +
+ "\x03\x070.\x03\x071\x16\x03\x071\x04\x03\x0710\x03\x072\x18\x03\x072-" +
+ "\x03\x073\x14\x03\x073>\x03\x07'\x09\x03\x07 \x00\x03\x07\x1f\x0b\x03" +
+ "\x07\x18#\x03\x07\x18(\x03\x07\x186\x03\x07\x18\x03\x03\x07\x19\x16\x03" +
+ "\x07\x116\x03\x07\x12'\x03\x07\x13\x10\x03\x07\x0c&\x03\x07\x0c\x08\x03" +
+ "\x07\x0c\x13\x03\x07\x0d\x02\x03\x07\x0d\x1c\x03\x07\x0b5\x03\x07\x0b" +
+ "\x0a\x03\x07\x0b\x01\x03\x07\x0b\x0f\x03\x07\x05\x00\x03\x07\x05\x09\x03" +
+ "\x07\x05\x0b\x03\x07\x07\x01\x03\x07\x07\x08\x03\x07\x00<\x03\x07\x00+" +
+ "\x03\x07\x01)\x03\x07\x01\x1b\x03\x07\x01\x08\x03\x07\x03?\x03\x0445\x03" +
+ "\x044\x08\x03\x0454\x03\x04)/\x03\x04)5\x03\x04+\x05\x03\x04+\x14\x03" +
+ "\x04+ \x03\x04+<\x03\x04*&\x03\x04*\x22\x03\x04&8\x03\x04!\x01\x03\x04!" +
+ "\x22\x03\x04\x11+\x03\x04\x10.\x03\x04\x104\x03\x04\x13=\x03\x04\x12\x04" +
+ "\x03\x04\x12\x0a\x03\x04\x0d\x1d\x03\x04\x0d\x07\x03\x04\x0d \x03\x05<>" +
+ "\x03\x055<\x03\x055!\x03\x055#\x03\x055&\x03\x054\x1d\x03\x054\x02\x03" +
+ "\x054\x07\x03\x0571\x03\x053\x1a\x03\x053\x16\x03\x05.<\x03\x05.\x07\x03" +
+ "\x05):\x03\x05)<\x03\x05)\x0c\x03\x05)\x15\x03\x05+-\x03\x05+5\x03\x05$" +
+ "\x1e\x03\x05$\x14\x03\x05'\x04\x03\x05'\x14\x03\x05&\x02\x03\x05\x226" +
+ "\x03\x05\x22\x0c\x03\x05\x22\x1c\x03\x05\x19\x0a\x03\x05\x1b\x09\x03\x05" +
+ "\x1b\x0c\x03\x05\x14\x07\x03\x05\x16?\x03\x05\x16\x0c\x03\x05\x0c\x05" +
+ "\x03\x05\x0e\x0f\x03\x05\x01\x0e\x03\x05\x00(\x03\x05\x030\x03\x05\x03" +
+ "\x06\x03\x0a==\x03\x0a=1\x03\x0a=,\x03\x0a=\x0c\x03\x0a??\x03\x0a<\x08" +
+ "\x03\x0a9!\x03\x0a9)\x03\x0a97\x03\x0a99\x03\x0a6\x0a\x03\x0a6\x1c\x03" +
+ "\x0a6\x17\x03\x0a7'\x03\x0a78\x03\x0a73\x03\x0a'\x01\x03\x0a'&\x03\x0a" +
+ "\x1f\x0e\x03\x0a\x1f\x03\x03\x0a\x1f3\x03\x0a\x1b/\x03\x0a\x18\x19\x03" +
+ "\x0a\x19\x01\x03\x0a\x16\x14\x03\x0a\x0e\x22\x03\x0a\x0f\x10\x03\x0a\x0f" +
+ "\x02\x03\x0a\x0f \x03\x0a\x0c\x04\x03\x0a\x0b>\x03\x0a\x0b+\x03\x0a\x08/" +
+ "\x03\x0a\x046\x03\x0a\x05\x14\x03\x0a\x00\x04\x03\x0a\x00\x10\x03\x0a" +
+ "\x00\x14\x03\x0b<3\x03\x0b;*\x03\x0b9\x22\x03\x0b9)\x03\x0b97\x03\x0b+" +
+ "\x10\x03\x0b((\x03\x0b&5\x03\x0b$\x1c\x03\x0b$\x12\x03\x0b%\x04\x03\x0b#" +
+ "<\x03\x0b#0\x03\x0b#\x0d\x03\x0b#\x19\x03\x0b!:\x03\x0b!\x1f\x03\x0b!" +
+ "\x00\x03\x0b\x1e5\x03\x0b\x1c\x1d\x03\x0b\x1d-\x03\x0b\x1d(\x03\x0b\x18." +
+ "\x03\x0b\x18 \x03\x0b\x18\x16\x03\x0b\x14\x13\x03\x0b\x15$\x03\x0b\x15" +
+ "\x22\x03\x0b\x12\x1b\x03\x0b\x12\x10\x03\x0b\x132\x03\x0b\x13=\x03\x0b" +
+ "\x12\x18\x03\x0b\x0c&\x03\x0b\x061\x03\x0b\x06:\x03\x0b\x05#\x03\x0b\x05" +
+ "<\x03\x0b\x04\x0b\x03\x0b\x04\x04\x03\x0b\x04\x1b\x03\x0b\x042\x03\x0b" +
+ "\x041\x03\x0b\x03\x03\x03\x0b\x03\x1d\x03\x0b\x03/\x03\x0b\x03+\x03\x0b" +
+ "\x02\x1b\x03\x0b\x02\x00\x03\x0b\x01\x1e\x03\x0b\x01\x08\x03\x0b\x015" +
+ "\x03\x06\x0d9\x03\x06\x0d=\x03\x06\x0d?\x03\x02\x001\x03\x02\x003\x03" +
+ "\x02\x02\x19\x03\x02\x006\x03\x02\x02\x1b\x03\x02\x004\x03\x02\x00<\x03" +
+ "\x02\x02\x0a\x03\x02\x02\x0e\x03\x02\x01\x1a\x03\x02\x01\x07\x03\x02\x01" +
+ "\x05\x03\x02\x01\x0b\x03\x02\x01%\x03\x02\x01\x0c\x03\x02\x01\x04\x03" +
+ "\x02\x01\x1c\x03\x02\x00.\x03\x02\x002\x03\x02\x00>\x03\x02\x00\x12\x03" +
+ "\x02\x00\x16\x03\x02\x011\x03\x02\x013\x03\x02\x02 \x03\x02\x02%\x03\x02" +
+ "\x02$\x03\x02\x028\x03\x02\x02;\x03\x02\x024\x03\x02\x012\x03\x02\x022" +
+ "\x03\x02\x02/\x03\x02\x01,\x03\x02\x01\x13\x03\x02\x01\x16\x03\x02\x01" +
+ "\x11\x03\x02\x01\x1e\x03\x02\x01\x15\x03\x02\x01\x17\x03\x02\x01\x0f\x03" +
+ "\x02\x01\x08\x03\x02\x00?\x03\x02\x03\x07\x03\x02\x03\x0d\x03\x02\x03" +
+ "\x13\x03\x02\x03\x1d\x03\x02\x03\x1f\x03\x02\x00\x03\x03\x02\x00\x0d\x03" +
+ "\x02\x00\x01\x03\x02\x00\x1b\x03\x02\x00\x19\x03\x02\x00\x18\x03\x02\x00" +
+ "\x13\x03\x02\x00/\x03\x07>\x12\x03\x07<\x1f\x03\x07>\x1d\x03\x06\x1d\x0e" +
+ "\x03\x07>\x1c\x03\x07>:\x03\x07>\x13\x03\x04\x12+\x03\x07?\x03\x03\x07>" +
+ "\x02\x03\x06\x224\x03\x06\x1a.\x03\x07<%\x03\x06\x1c\x0b\x03\x0609\x03" +
+ "\x05\x1f\x01\x03\x04'\x08\x03\x93\xfd\xf5\x03\x02\x0d \x03\x02\x0d#\x03" +
+ "\x02\x0d!\x03\x02\x0d&\x03\x02\x0d\x22\x03\x02\x0d/\x03\x02\x0d,\x03\x02" +
+ "\x0d$\x03\x02\x0d'\x03\x02\x0d%\x03\x02\x0d;\x03\x02\x0d=\x03\x02\x0d?" +
+ "\x03\x099.\x03\x08\x0b7\x03\x08\x02\x14\x03\x08\x14\x0d\x03\x08.:\x03" +
+ "\x089'\x03\x0f\x0b\x18\x03\x0f\x1c1\x03\x0f\x17&\x03\x0f9\x1f\x03\x0f0" +
+ "\x0c\x03\x0e\x0a9\x03\x0e\x056\x03\x0e\x1c#\x03\x0f\x13\x0e\x03\x072\x00" +
+ "\x03\x070\x0d\x03\x072\x0b\x03\x06\x11\x18\x03\x070\x10\x03\x06\x0f(\x03" +
+ "\x072\x05\x03\x06\x0f,\x03\x073\x15\x03\x06\x07\x08\x03\x05\x16\x02\x03" +
+ "\x04\x0b \x03\x05:8\x03\x05\x16%\x03\x0a\x0d\x1f\x03\x06\x16\x10\x03\x05" +
+ "\x1d5\x03\x05*;\x03\x05\x16\x1b\x03\x04.-\x03\x06\x1a\x19\x03\x04\x03," +
+ "\x03\x0b87\x03\x04/\x0a\x03\x06\x00,\x03\x04-\x01\x03\x04\x1e-\x03\x06/(" +
+ "\x03\x0a\x0b5\x03\x06\x0e7\x03\x06\x07.\x03\x0597\x03\x0a*%\x03\x0760" +
+ "\x03\x06\x0c;\x03\x05'\x00\x03\x072.\x03\x072\x08\x03\x06=\x01\x03\x06" +
+ "\x05\x1b\x03\x06\x06\x12\x03\x06$=\x03\x06'\x0d\x03\x04\x11\x0f\x03\x076" +
+ ",\x03\x06\x07;\x03\x06.,\x03\x86\xf9\xea\x03\x8f\xff\xeb\x02\x092\x02" +
+ "\x095\x02\x094\x02\x09;\x02\x09>\x02\x098\x02\x09*\x02\x09/\x02\x09,\x02" +
+ "\x09%\x02\x09&\x02\x09#\x02\x09 \x02\x08!\x02\x08%\x02\x08$\x02\x08+\x02" +
+ "\x08.\x02\x08*\x02\x08&\x02\x088\x02\x08>\x02\x084\x02\x086\x02\x080\x02" +
+ "\x08\x10\x02\x08\x17\x02\x08\x12\x02\x08\x1d\x02\x08\x1f\x02\x08\x13\x02" +
+ "\x08\x15\x02\x08\x14\x02\x08\x0c\x03\x8b\xfd\xd0\x03\x81\xec\xc6\x03\x87" +
+ "\xe0\x8a\x03-2\xe3\x03\x80\xef\xe4\x03-2\xea\x03\x88\xe6\xeb\x03\x8e\xe6" +
+ "\xe8\x03\x84\xe6\xe9\x03\x97\xe6\xee\x03-2\xf9\x03-2\xf6\x03\x8e\xe3\xad" +
+ "\x03\x80\xe3\x92\x03\x88\xe3\x90\x03\x8e\xe3\x90\x03\x80\xe3\x97\x03\x88" +
+ "\xe3\x95\x03\x88\xfe\xcb\x03\x8e\xfe\xca\x03\x84\xfe\xcd\x03\x91\xef\xc9" +
+ "\x03-2\xc1\x03-2\xc0\x03-2\xcb\x03\x88@\x09\x03\x8e@\x08\x03\x8f\xe0\xf5" +
+ "\x03\x8e\xe6\xf9\x03\x8e\xe0\xfa\x03\x93\xff\xf4\x03\x84\xee\xd3\x03\x0b" +
+ "(\x04\x023 \x03\x0b)\x08\x021;\x02\x01*\x03\x0b#\x10\x03\x0b 0\x03\x0b!" +
+ "\x10\x03\x0b!0\x03\x07\x15\x08\x03\x09?5\x03\x07\x1f\x08\x03\x07\x17\x0b" +
+ "\x03\x09\x1f\x15\x03\x0b\x1c7\x03\x0a+#\x03\x06\x1a\x1b\x03\x06\x1a\x14" +
+ "\x03\x0a\x01\x18\x03\x06#\x1b\x03\x0a2\x0c\x03\x0a\x01\x04\x03\x09#;\x03" +
+ "\x08='\x03\x08\x1a\x0a\x03\x07\x03\x07:+\x03\x07\x07*\x03\x06&\x1c\x03" +
+ "\x09\x0c\x16\x03\x09\x10\x0e\x03\x08'\x0f\x03\x08+\x09\x03\x074%\x03\x06" +
+ "!3\x03\x06\x03+\x03\x0b\x1e\x19\x03\x0a))\x03\x09\x08\x19\x03\x08,\x05" +
+ "\x03\x07<2\x03\x06\x1c>\x03\x0a\x111\x03\x09\x1b\x09\x03\x073.\x03\x07" +
+ "\x01\x00\x03\x09/,\x03\x07#>\x03\x07\x048\x03\x0a\x1f\x22\x03\x098>\x03" +
+ "\x09\x11\x00\x03\x08/\x17\x03\x06'\x22\x03\x0b\x1a+\x03\x0a\x22\x19\x03" +
+ "\x0a/1\x03\x0974\x03\x09\x0f\x22\x03\x08,\x22\x03\x08?\x14\x03\x07$5\x03" +
+ "\x07<3\x03\x07=*\x03\x07\x13\x18\x03\x068\x0a\x03\x06\x09\x16\x03\x06" +
+ "\x13\x00\x03\x08\x067\x03\x08\x01\x03\x03\x08\x12\x1d\x03\x07+7\x03\x06(" +
+ ";\x03\x06\x1c?\x03\x07\x0e\x17\x03\x0a\x06\x1d\x03\x0a\x19\x07\x03\x08" +
+ "\x14$\x03\x07$;\x03\x08,$\x03\x08\x06\x0d\x03\x07\x16\x0a\x03\x06>>\x03" +
+ "\x0a\x06\x12\x03\x0a\x14)\x03\x09\x0d\x1f\x03\x09\x12\x17\x03\x09\x19" +
+ "\x01\x03\x08\x11 \x03\x08\x1d'\x03\x06<\x1a\x03\x0a.\x00\x03\x07'\x18" +
+ "\x03\x0a\x22\x08\x03\x08\x0d\x0a\x03\x08\x13)\x03\x07*)\x03\x06<,\x03" +
+ "\x07\x0b\x1a\x03\x09.\x14\x03\x09\x0d\x1e\x03\x07\x0e#\x03\x0b\x1d'\x03" +
+ "\x0a\x0a8\x03\x09%2\x03\x08+&\x03\x080\x12\x03\x0a)4\x03\x08\x06\x1f\x03" +
+ "\x0b\x1b\x1a\x03\x0a\x1b\x0f\x03\x0b\x1d*\x03\x09\x16$\x03\x090\x11\x03" +
+ "\x08\x11\x08\x03\x0a*(\x03\x0a\x042\x03\x089,\x03\x074'\x03\x07\x0f\x05" +
+ "\x03\x09\x0b\x0a\x03\x07\x1b\x01\x03\x09\x17:\x03\x09.\x0d\x03\x07.\x11" +
+ "\x03\x09+\x15\x03\x080\x13\x03\x0b\x1f\x19\x03\x0a \x11\x03\x0a\x220\x03" +
+ "\x09\x07;\x03\x08\x16\x1c\x03\x07,\x13\x03\x07\x0e/\x03\x06\x221\x03\x0a" +
+ ".\x0a\x03\x0a7\x02\x03\x0a\x032\x03\x0a\x1d.\x03\x091\x06\x03\x09\x19:" +
+ "\x03\x08\x02/\x03\x060+\x03\x06\x0f-\x03\x06\x1c\x1f\x03\x06\x1d\x07\x03" +
+ "\x0a,\x11\x03\x09=\x0d\x03\x09\x0b;\x03\x07\x1b/\x03\x0a\x1f:\x03\x09 " +
+ "\x1f\x03\x09.\x10\x03\x094\x0b\x03\x09\x1a1\x03\x08#\x1a\x03\x084\x1d" +
+ "\x03\x08\x01\x1f\x03\x08\x11\x22\x03\x07'8\x03\x07\x1a>\x03\x0757\x03" +
+ "\x06&9\x03\x06+\x11\x03\x0a.\x0b\x03\x0a,>\x03\x0a4#\x03\x08%\x17\x03" +
+ "\x07\x05\x22\x03\x07\x0c\x0b\x03\x0a\x1d+\x03\x0a\x19\x16\x03\x09+\x1f" +
+ "\x03\x09\x08\x0b\x03\x08\x16\x18\x03\x08+\x12\x03\x0b\x1d\x0c\x03\x0a=" +
+ "\x10\x03\x0a\x09\x0d\x03\x0a\x10\x11\x03\x09&0\x03\x08(\x1f\x03\x087\x07" +
+ "\x03\x08\x185\x03\x07'6\x03\x06.\x05\x03\x06=\x04\x03\x06;;\x03\x06\x06," +
+ "\x03\x0b\x18>\x03\x08\x00\x18\x03\x06 \x03\x03\x06<\x00\x03\x09%\x18\x03" +
+ "\x0b\x1c<\x03\x0a%!\x03\x0a\x09\x12\x03\x0a\x16\x02\x03\x090'\x03\x09" +
+ "\x0e=\x03\x08 \x0e\x03\x08>\x03\x03\x074>\x03\x06&?\x03\x06\x19\x09\x03" +
+ "\x06?(\x03\x0a-\x0e\x03\x09:3\x03\x098:\x03\x09\x12\x0b\x03\x09\x1d\x17" +
+ "\x03\x087\x05\x03\x082\x14\x03\x08\x06%\x03\x08\x13\x1f\x03\x06\x06\x0e" +
+ "\x03\x0a\x22<\x03\x09/<\x03\x06>+\x03\x0a'?\x03\x0a\x13\x0c\x03\x09\x10<" +
+ "\x03\x07\x1b=\x03\x0a\x19\x13\x03\x09\x22\x1d\x03\x09\x07\x0d\x03\x08)" +
+ "\x1c\x03\x06=\x1a\x03\x0a/4\x03\x0a7\x11\x03\x0a\x16:\x03\x09?3\x03\x09:" +
+ "/\x03\x09\x05\x0a\x03\x09\x14\x06\x03\x087\x22\x03\x080\x07\x03\x08\x1a" +
+ "\x1f\x03\x07\x04(\x03\x07\x04\x09\x03\x06 %\x03\x06<\x08\x03\x0a+\x14" +
+ "\x03\x09\x1d\x16\x03\x0a70\x03\x08 >\x03\x0857\x03\x070\x0a\x03\x06=\x12" +
+ "\x03\x06\x16%\x03\x06\x1d,\x03\x099#\x03\x09\x10>\x03\x07 \x1e\x03\x08" +
+ "\x0c<\x03\x08\x0b\x18\x03\x08\x15+\x03\x08,:\x03\x08%\x22\x03\x07\x0a$" +
+ "\x03\x0b\x1c=\x03\x07+\x08\x03\x0a/\x05\x03\x0a \x07\x03\x0a\x12'\x03" +
+ "\x09#\x11\x03\x08\x1b\x15\x03\x0a\x06\x01\x03\x09\x1c\x1b\x03\x0922\x03" +
+ "\x07\x14<\x03\x07\x09\x04\x03\x061\x04\x03\x07\x0e\x01\x03\x0a\x13\x18" +
+ "\x03\x0a-\x0c\x03\x0a?\x0d\x03\x0a\x09\x0a\x03\x091&\x03\x0a/\x0b\x03" +
+ "\x08$<\x03\x083\x1d\x03\x08\x0c$\x03\x08\x0d\x07\x03\x08\x0d?\x03\x08" +
+ "\x0e\x14\x03\x065\x0a\x03\x08\x1a#\x03\x08\x16#\x03\x0702\x03\x07\x03" +
+ "\x1a\x03\x06(\x1d\x03\x06+\x1b\x03\x06\x0b\x05\x03\x06\x0b\x17\x03\x06" +
+ "\x0c\x04\x03\x06\x1e\x19\x03\x06+0\x03\x062\x18\x03\x0b\x16\x1e\x03\x0a+" +
+ "\x16\x03\x0a-?\x03\x0a#:\x03\x0a#\x10\x03\x0a%$\x03\x0a>+\x03\x0a01\x03" +
+ "\x0a1\x10\x03\x0a\x099\x03\x0a\x0a\x12\x03\x0a\x19\x1f\x03\x0a\x19\x12" +
+ "\x03\x09*)\x03\x09-\x16\x03\x09.1\x03\x09.2\x03\x09<\x0e\x03\x09> \x03" +
+ "\x093\x12\x03\x09\x0b\x01\x03\x09\x1c2\x03\x09\x11\x1c\x03\x09\x15%\x03" +
+ "\x08,&\x03\x08!\x22\x03\x089(\x03\x08\x0b\x1a\x03\x08\x0d2\x03\x08\x0c" +
+ "\x04\x03\x08\x0c\x06\x03\x08\x0c\x1f\x03\x08\x0c\x0c\x03\x08\x0f\x1f\x03" +
+ "\x08\x0f\x1d\x03\x08\x00\x14\x03\x08\x03\x14\x03\x08\x06\x16\x03\x08\x1e" +
+ "#\x03\x08\x11\x11\x03\x08\x10\x18\x03\x08\x14(\x03\x07)\x1e\x03\x07.1" +
+ "\x03\x07 $\x03\x07 '\x03\x078\x08\x03\x07\x0d0\x03\x07\x0f7\x03\x07\x05#" +
+ "\x03\x07\x05\x1a\x03\x07\x1a7\x03\x07\x1d-\x03\x07\x17\x10\x03\x06)\x1f" +
+ "\x03\x062\x0b\x03\x066\x16\x03\x06\x09\x11\x03\x09(\x1e\x03\x07!5\x03" +
+ "\x0b\x11\x16\x03\x0a/\x04\x03\x0a,\x1a\x03\x0b\x173\x03\x0a,1\x03\x0a/5" +
+ "\x03\x0a\x221\x03\x0a\x22\x0d\x03\x0a?%\x03\x0a<,\x03\x0a?#\x03\x0a>\x19" +
+ "\x03\x0a\x08&\x03\x0a\x0b\x0e\x03\x0a\x0c:\x03\x0a\x0c+\x03\x0a\x03\x22" +
+ "\x03\x0a\x06)\x03\x0a\x11\x10\x03\x0a\x11\x1a\x03\x0a\x17-\x03\x0a\x14(" +
+ "\x03\x09)\x1e\x03\x09/\x09\x03\x09.\x00\x03\x09,\x07\x03\x09/*\x03\x09-9" +
+ "\x03\x09\x228\x03\x09%\x09\x03\x09:\x12\x03\x09;\x1d\x03\x09?\x06\x03" +
+ "\x093%\x03\x096\x05\x03\x096\x08\x03\x097\x02\x03\x09\x07,\x03\x09\x04," +
+ "\x03\x09\x1f\x16\x03\x09\x11\x03\x03\x09\x11\x12\x03\x09\x168\x03\x08*" +
+ "\x05\x03\x08/2\x03\x084:\x03\x08\x22+\x03\x08 0\x03\x08&\x0a\x03\x08;" +
+ "\x10\x03\x08>$\x03\x08>\x18\x03\x0829\x03\x082:\x03\x081,\x03\x081<\x03" +
+ "\x081\x1c\x03\x087#\x03\x087*\x03\x08\x09'\x03\x08\x00\x1d\x03\x08\x05-" +
+ "\x03\x08\x1f4\x03\x08\x1d\x04\x03\x08\x16\x0f\x03\x07*7\x03\x07'!\x03" +
+ "\x07%\x1b\x03\x077\x0c\x03\x07\x0c1\x03\x07\x0c.\x03\x07\x00\x06\x03\x07" +
+ "\x01\x02\x03\x07\x010\x03\x07\x06=\x03\x07\x01\x03\x03\x07\x01\x13\x03" +
+ "\x07\x06\x06\x03\x07\x05\x0a\x03\x07\x1f\x09\x03\x07\x17:\x03\x06*1\x03" +
+ "\x06-\x1d\x03\x06\x223\x03\x062:\x03\x060$\x03\x066\x1e\x03\x064\x12\x03" +
+ "\x0645\x03\x06\x0b\x00\x03\x06\x0b7\x03\x06\x07\x1f\x03\x06\x15\x12\x03" +
+ "\x0c\x05\x0f\x03\x0b+\x0b\x03\x0b+-\x03\x06\x16\x1b\x03\x06\x15\x17\x03" +
+ "\x89\xca\xea\x03\x89\xca\xe8\x03\x0c8\x10\x03\x0c8\x01\x03\x0c8\x0f\x03" +
+ "\x0d8%\x03\x0d8!\x03\x0c8-\x03\x0c8/\x03\x0c8+\x03\x0c87\x03\x0c85\x03" +
+ "\x0c9\x09\x03\x0c9\x0d\x03\x0c9\x0f\x03\x0c9\x0b\x03\xcfu\x0c\x03\xcfu" +
+ "\x0f\x03\xcfu\x0e\x03\xcfu\x09\x03\x0c9\x10\x03\x0d9\x0c\x03\xcf`;\x03" +
+ "\xcf`>\x03\xcf`9\x03\xcf`8\x03\xcf`7\x03\xcf`*\x03\xcf`-\x03\xcf`,\x03" +
+ "\x0d\x1b\x1a\x03\x0d\x1b&\x03\x0c=.\x03\x0c=%\x03\x0c>\x1e\x03\x0c>\x14" +
+ "\x03\x0c?\x06\x03\x0c?\x0b\x03\x0c?\x0c\x03\x0c?\x0d\x03\x0c?\x02\x03" +
+ "\x0c>\x0f\x03\x0c>\x08\x03\x0c>\x09\x03\x0c>,\x03\x0c>\x0c\x03\x0c?\x13" +
+ "\x03\x0c?\x16\x03\x0c?\x15\x03\x0c?\x1c\x03\x0c?\x1f\x03\x0c?\x1d\x03" +
+ "\x0c?\x1a\x03\x0c?\x17\x03\x0c?\x08\x03\x0c?\x09\x03\x0c?\x0e\x03\x0c?" +
+ "\x04\x03\x0c?\x05\x03\x0c\x03\x0c=\x00\x03\x0c=\x06\x03\x0c=\x05\x03" +
+ "\x0c=\x0c\x03\x0c=\x0f\x03\x0c=\x0d\x03\x0c=\x0b\x03\x0c=\x07\x03\x0c=" +
+ "\x19\x03\x0c=\x15\x03\x0c=\x11\x03\x0c=1\x03\x0c=3\x03\x0c=0\x03\x0c=>" +
+ "\x03\x0c=2\x03\x0c=6\x03\x0c<\x07\x03\x0c<\x05\x03\x0e:!\x03\x0e:#\x03" +
+ "\x0e8\x09\x03\x0e:&\x03\x0e8\x0b\x03\x0e:$\x03\x0e:,\x03\x0e8\x1a\x03" +
+ "\x0e8\x1e\x03\x0e:*\x03\x0e:7\x03\x0e:5\x03\x0e:;\x03\x0e:\x15\x03\x0e:<" +
+ "\x03\x0e:4\x03\x0e:'\x03\x0e:-\x03\x0e:%\x03\x0e:?\x03\x0e:=\x03\x0e:)" +
+ "\x03\x0e:/\x03\xcfs'\x03\x0d=\x0f\x03\x0d+*\x03\x0d99\x03\x0d9;\x03\x0d9" +
+ "?\x03\x0d)\x0d\x03\x0d(%\x02\x01\x18\x02\x01(\x02\x01\x1e\x03\x0f$!\x03" +
+ "\x0f87\x03\x0f4\x0e\x03\x0f5\x1d\x03\x06'\x03\x03\x0f\x08\x18\x03\x0f" +
+ "\x0d\x1b\x03\x0e2=\x03\x0e;\x08\x03\x0e:\x0b\x03\x0e\x06$\x03\x0e\x0d)" +
+ "\x03\x0e\x16\x1f\x03\x0e\x16\x1b\x03\x0d$\x0a\x03\x05,\x1d\x03\x0d. \x03" +
+ "\x0d.#\x03\x0c(/\x03\x09%\x02\x03\x0d90\x03\x0d\x0e4\x03\x0d\x0d\x0f\x03" +
+ "\x0c#\x00\x03\x0c,\x1e\x03\x0c2\x0e\x03\x0c\x01\x17\x03\x0c\x09:\x03\x0e" +
+ "\x173\x03\x0c\x08\x03\x03\x0c\x11\x07\x03\x0c\x10\x18\x03\x0c\x1f\x1c" +
+ "\x03\x0c\x19\x0e\x03\x0c\x1a\x1f\x03\x0f0>\x03\x0b->\x03\x0b<+\x03\x0b8" +
+ "\x13\x03\x0b\x043\x03\x0b\x14\x03\x03\x0b\x16%\x03\x0d\x22&\x03\x0b\x1a" +
+ "\x1a\x03\x0b\x1a\x04\x03\x0a%9\x03\x0a&2\x03\x0a&0\x03\x0a!\x1a\x03\x0a!" +
+ "7\x03\x0a5\x10\x03\x0a=4\x03\x0a?\x0e\x03\x0a>\x10\x03\x0a\x00 \x03\x0a" +
+ "\x0f:\x03\x0a\x0f9\x03\x0a\x0b\x0a\x03\x0a\x17%\x03\x0a\x1b-\x03\x09-" +
+ "\x1a\x03\x09,4\x03\x09.,\x03\x09)\x09\x03\x096!\x03\x091\x1f\x03\x093" +
+ "\x16\x03\x0c+\x1f\x03\x098 \x03\x098=\x03\x0c(\x1a\x03\x0c(\x16\x03\x09" +
+ "\x0a+\x03\x09\x16\x12\x03\x09\x13\x0e\x03\x09\x153\x03\x08)!\x03\x09\x1a" +
+ "\x01\x03\x09\x18\x01\x03\x08%#\x03\x08>\x22\x03\x08\x05%\x03\x08\x02*" +
+ "\x03\x08\x15;\x03\x08\x1b7\x03\x0f\x07\x1d\x03\x0f\x04\x03\x03\x070\x0c" +
+ "\x03\x07;\x0b\x03\x07\x08\x17\x03\x07\x12\x06\x03\x06/-\x03\x0671\x03" +
+ "\x065+\x03\x06>7\x03\x06\x049\x03\x05+\x1e\x03\x05,\x17\x03\x05 \x1d\x03" +
+ "\x05\x22\x05\x03\x050\x1d"
+
+// lookup returns the trie value for the first UTF-8 encoding in s and
+// the width in bytes of this encoding. The size will be 0 if s does not
+// hold enough bytes to complete the encoding. len(s) must be greater than 0.
+func (t *idnaTrie) lookup(s []byte) (v uint16, sz int) {
+ c0 := s[0]
+ switch {
+ case c0 < 0x80: // is ASCII
+ return idnaValues[c0], 1
+ case c0 < 0xC2:
+ return 0, 1 // Illegal UTF-8: not a starter, not ASCII.
+ case c0 < 0xE0: // 2-byte UTF-8
+ if len(s) < 2 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c1), 2
+ case c0 < 0xF0: // 3-byte UTF-8
+ if len(s) < 3 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = idnaIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c2), 3
+ case c0 < 0xF8: // 4-byte UTF-8
+ if len(s) < 4 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = idnaIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ o = uint32(i)<<6 + uint32(c2)
+ i = idnaIndex[o]
+ c3 := s[3]
+ if c3 < 0x80 || 0xC0 <= c3 {
+ return 0, 3 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c3), 4
+ }
+ // Illegal rune
+ return 0, 1
+}
+
+// lookupUnsafe returns the trie value for the first UTF-8 encoding in s.
+// s must start with a full and valid UTF-8 encoded rune.
+func (t *idnaTrie) lookupUnsafe(s []byte) uint16 {
+ c0 := s[0]
+ if c0 < 0x80 { // is ASCII
+ return idnaValues[c0]
+ }
+ i := idnaIndex[c0]
+ if c0 < 0xE0 { // 2-byte UTF-8
+ return t.lookupValue(uint32(i), s[1])
+ }
+ i = idnaIndex[uint32(i)<<6+uint32(s[1])]
+ if c0 < 0xF0 { // 3-byte UTF-8
+ return t.lookupValue(uint32(i), s[2])
+ }
+ i = idnaIndex[uint32(i)<<6+uint32(s[2])]
+ if c0 < 0xF8 { // 4-byte UTF-8
+ return t.lookupValue(uint32(i), s[3])
+ }
+ return 0
+}
+
+// lookupString returns the trie value for the first UTF-8 encoding in s and
+// the width in bytes of this encoding. The size will be 0 if s does not
+// hold enough bytes to complete the encoding. len(s) must be greater than 0.
+func (t *idnaTrie) lookupString(s string) (v uint16, sz int) {
+ c0 := s[0]
+ switch {
+ case c0 < 0x80: // is ASCII
+ return idnaValues[c0], 1
+ case c0 < 0xC2:
+ return 0, 1 // Illegal UTF-8: not a starter, not ASCII.
+ case c0 < 0xE0: // 2-byte UTF-8
+ if len(s) < 2 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c1), 2
+ case c0 < 0xF0: // 3-byte UTF-8
+ if len(s) < 3 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = idnaIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c2), 3
+ case c0 < 0xF8: // 4-byte UTF-8
+ if len(s) < 4 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = idnaIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ o = uint32(i)<<6 + uint32(c2)
+ i = idnaIndex[o]
+ c3 := s[3]
+ if c3 < 0x80 || 0xC0 <= c3 {
+ return 0, 3 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c3), 4
+ }
+ // Illegal rune
+ return 0, 1
+}
+
+// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s.
+// s must start with a full and valid UTF-8 encoded rune.
+func (t *idnaTrie) lookupStringUnsafe(s string) uint16 {
+ c0 := s[0]
+ if c0 < 0x80 { // is ASCII
+ return idnaValues[c0]
+ }
+ i := idnaIndex[c0]
+ if c0 < 0xE0 { // 2-byte UTF-8
+ return t.lookupValue(uint32(i), s[1])
+ }
+ i = idnaIndex[uint32(i)<<6+uint32(s[1])]
+ if c0 < 0xF0 { // 3-byte UTF-8
+ return t.lookupValue(uint32(i), s[2])
+ }
+ i = idnaIndex[uint32(i)<<6+uint32(s[2])]
+ if c0 < 0xF8 { // 4-byte UTF-8
+ return t.lookupValue(uint32(i), s[3])
+ }
+ return 0
+}
+
+// idnaTrie. Total size: 29708 bytes (29.01 KiB). Checksum: c3ecc76d8fffa6e6.
+type idnaTrie struct{}
+
+func newIdnaTrie(i int) *idnaTrie {
+ return &idnaTrie{}
+}
+
+// lookupValue determines the type of block n and looks up the value for b.
+func (t *idnaTrie) lookupValue(n uint32, b byte) uint16 {
+ switch {
+ case n < 125:
+ return uint16(idnaValues[n<<6+uint32(b)])
+ default:
+ n -= 125
+ return uint16(idnaSparse.lookup(n, b))
+ }
+}
+
+// idnaValues: 127 blocks, 8128 entries, 16256 bytes
+// The third block is the zero block.
+var idnaValues = [8128]uint16{
+ // Block 0x0, offset 0x0
+ 0x00: 0x0080, 0x01: 0x0080, 0x02: 0x0080, 0x03: 0x0080, 0x04: 0x0080, 0x05: 0x0080,
+ 0x06: 0x0080, 0x07: 0x0080, 0x08: 0x0080, 0x09: 0x0080, 0x0a: 0x0080, 0x0b: 0x0080,
+ 0x0c: 0x0080, 0x0d: 0x0080, 0x0e: 0x0080, 0x0f: 0x0080, 0x10: 0x0080, 0x11: 0x0080,
+ 0x12: 0x0080, 0x13: 0x0080, 0x14: 0x0080, 0x15: 0x0080, 0x16: 0x0080, 0x17: 0x0080,
+ 0x18: 0x0080, 0x19: 0x0080, 0x1a: 0x0080, 0x1b: 0x0080, 0x1c: 0x0080, 0x1d: 0x0080,
+ 0x1e: 0x0080, 0x1f: 0x0080, 0x20: 0x0080, 0x21: 0x0080, 0x22: 0x0080, 0x23: 0x0080,
+ 0x24: 0x0080, 0x25: 0x0080, 0x26: 0x0080, 0x27: 0x0080, 0x28: 0x0080, 0x29: 0x0080,
+ 0x2a: 0x0080, 0x2b: 0x0080, 0x2c: 0x0080, 0x2d: 0x0008, 0x2e: 0x0008, 0x2f: 0x0080,
+ 0x30: 0x0008, 0x31: 0x0008, 0x32: 0x0008, 0x33: 0x0008, 0x34: 0x0008, 0x35: 0x0008,
+ 0x36: 0x0008, 0x37: 0x0008, 0x38: 0x0008, 0x39: 0x0008, 0x3a: 0x0080, 0x3b: 0x0080,
+ 0x3c: 0x0080, 0x3d: 0x0080, 0x3e: 0x0080, 0x3f: 0x0080,
+ // Block 0x1, offset 0x40
+ 0x40: 0x0080, 0x41: 0xe105, 0x42: 0xe105, 0x43: 0xe105, 0x44: 0xe105, 0x45: 0xe105,
+ 0x46: 0xe105, 0x47: 0xe105, 0x48: 0xe105, 0x49: 0xe105, 0x4a: 0xe105, 0x4b: 0xe105,
+ 0x4c: 0xe105, 0x4d: 0xe105, 0x4e: 0xe105, 0x4f: 0xe105, 0x50: 0xe105, 0x51: 0xe105,
+ 0x52: 0xe105, 0x53: 0xe105, 0x54: 0xe105, 0x55: 0xe105, 0x56: 0xe105, 0x57: 0xe105,
+ 0x58: 0xe105, 0x59: 0xe105, 0x5a: 0xe105, 0x5b: 0x0080, 0x5c: 0x0080, 0x5d: 0x0080,
+ 0x5e: 0x0080, 0x5f: 0x0080, 0x60: 0x0080, 0x61: 0x0008, 0x62: 0x0008, 0x63: 0x0008,
+ 0x64: 0x0008, 0x65: 0x0008, 0x66: 0x0008, 0x67: 0x0008, 0x68: 0x0008, 0x69: 0x0008,
+ 0x6a: 0x0008, 0x6b: 0x0008, 0x6c: 0x0008, 0x6d: 0x0008, 0x6e: 0x0008, 0x6f: 0x0008,
+ 0x70: 0x0008, 0x71: 0x0008, 0x72: 0x0008, 0x73: 0x0008, 0x74: 0x0008, 0x75: 0x0008,
+ 0x76: 0x0008, 0x77: 0x0008, 0x78: 0x0008, 0x79: 0x0008, 0x7a: 0x0008, 0x7b: 0x0080,
+ 0x7c: 0x0080, 0x7d: 0x0080, 0x7e: 0x0080, 0x7f: 0x0080,
+ // Block 0x2, offset 0x80
+ // Block 0x3, offset 0xc0
+ 0xc0: 0x0040, 0xc1: 0x0040, 0xc2: 0x0040, 0xc3: 0x0040, 0xc4: 0x0040, 0xc5: 0x0040,
+ 0xc6: 0x0040, 0xc7: 0x0040, 0xc8: 0x0040, 0xc9: 0x0040, 0xca: 0x0040, 0xcb: 0x0040,
+ 0xcc: 0x0040, 0xcd: 0x0040, 0xce: 0x0040, 0xcf: 0x0040, 0xd0: 0x0040, 0xd1: 0x0040,
+ 0xd2: 0x0040, 0xd3: 0x0040, 0xd4: 0x0040, 0xd5: 0x0040, 0xd6: 0x0040, 0xd7: 0x0040,
+ 0xd8: 0x0040, 0xd9: 0x0040, 0xda: 0x0040, 0xdb: 0x0040, 0xdc: 0x0040, 0xdd: 0x0040,
+ 0xde: 0x0040, 0xdf: 0x0040, 0xe0: 0x000a, 0xe1: 0x0018, 0xe2: 0x0018, 0xe3: 0x0018,
+ 0xe4: 0x0018, 0xe5: 0x0018, 0xe6: 0x0018, 0xe7: 0x0018, 0xe8: 0x001a, 0xe9: 0x0018,
+ 0xea: 0x0039, 0xeb: 0x0018, 0xec: 0x0018, 0xed: 0x03c0, 0xee: 0x0018, 0xef: 0x004a,
+ 0xf0: 0x0018, 0xf1: 0x0018, 0xf2: 0x0069, 0xf3: 0x0079, 0xf4: 0x008a, 0xf5: 0x0005,
+ 0xf6: 0x0018, 0xf7: 0x0008, 0xf8: 0x00aa, 0xf9: 0x00c9, 0xfa: 0x00d9, 0xfb: 0x0018,
+ 0xfc: 0x00e9, 0xfd: 0x0119, 0xfe: 0x0149, 0xff: 0x0018,
+ // Block 0x4, offset 0x100
+ 0x100: 0xe00d, 0x101: 0x0008, 0x102: 0xe00d, 0x103: 0x0008, 0x104: 0xe00d, 0x105: 0x0008,
+ 0x106: 0xe00d, 0x107: 0x0008, 0x108: 0xe00d, 0x109: 0x0008, 0x10a: 0xe00d, 0x10b: 0x0008,
+ 0x10c: 0xe00d, 0x10d: 0x0008, 0x10e: 0xe00d, 0x10f: 0x0008, 0x110: 0xe00d, 0x111: 0x0008,
+ 0x112: 0xe00d, 0x113: 0x0008, 0x114: 0xe00d, 0x115: 0x0008, 0x116: 0xe00d, 0x117: 0x0008,
+ 0x118: 0xe00d, 0x119: 0x0008, 0x11a: 0xe00d, 0x11b: 0x0008, 0x11c: 0xe00d, 0x11d: 0x0008,
+ 0x11e: 0xe00d, 0x11f: 0x0008, 0x120: 0xe00d, 0x121: 0x0008, 0x122: 0xe00d, 0x123: 0x0008,
+ 0x124: 0xe00d, 0x125: 0x0008, 0x126: 0xe00d, 0x127: 0x0008, 0x128: 0xe00d, 0x129: 0x0008,
+ 0x12a: 0xe00d, 0x12b: 0x0008, 0x12c: 0xe00d, 0x12d: 0x0008, 0x12e: 0xe00d, 0x12f: 0x0008,
+ 0x130: 0x0179, 0x131: 0x0008, 0x132: 0x0035, 0x133: 0x004d, 0x134: 0xe00d, 0x135: 0x0008,
+ 0x136: 0xe00d, 0x137: 0x0008, 0x138: 0x0008, 0x139: 0xe01d, 0x13a: 0x0008, 0x13b: 0xe03d,
+ 0x13c: 0x0008, 0x13d: 0xe01d, 0x13e: 0x0008, 0x13f: 0x0199,
+ // Block 0x5, offset 0x140
+ 0x140: 0x0199, 0x141: 0xe01d, 0x142: 0x0008, 0x143: 0xe03d, 0x144: 0x0008, 0x145: 0xe01d,
+ 0x146: 0x0008, 0x147: 0xe07d, 0x148: 0x0008, 0x149: 0x01b9, 0x14a: 0xe00d, 0x14b: 0x0008,
+ 0x14c: 0xe00d, 0x14d: 0x0008, 0x14e: 0xe00d, 0x14f: 0x0008, 0x150: 0xe00d, 0x151: 0x0008,
+ 0x152: 0xe00d, 0x153: 0x0008, 0x154: 0xe00d, 0x155: 0x0008, 0x156: 0xe00d, 0x157: 0x0008,
+ 0x158: 0xe00d, 0x159: 0x0008, 0x15a: 0xe00d, 0x15b: 0x0008, 0x15c: 0xe00d, 0x15d: 0x0008,
+ 0x15e: 0xe00d, 0x15f: 0x0008, 0x160: 0xe00d, 0x161: 0x0008, 0x162: 0xe00d, 0x163: 0x0008,
+ 0x164: 0xe00d, 0x165: 0x0008, 0x166: 0xe00d, 0x167: 0x0008, 0x168: 0xe00d, 0x169: 0x0008,
+ 0x16a: 0xe00d, 0x16b: 0x0008, 0x16c: 0xe00d, 0x16d: 0x0008, 0x16e: 0xe00d, 0x16f: 0x0008,
+ 0x170: 0xe00d, 0x171: 0x0008, 0x172: 0xe00d, 0x173: 0x0008, 0x174: 0xe00d, 0x175: 0x0008,
+ 0x176: 0xe00d, 0x177: 0x0008, 0x178: 0x0065, 0x179: 0xe01d, 0x17a: 0x0008, 0x17b: 0xe03d,
+ 0x17c: 0x0008, 0x17d: 0xe01d, 0x17e: 0x0008, 0x17f: 0x01d9,
+ // Block 0x6, offset 0x180
+ 0x180: 0x0008, 0x181: 0x007d, 0x182: 0xe00d, 0x183: 0x0008, 0x184: 0xe00d, 0x185: 0x0008,
+ 0x186: 0x007d, 0x187: 0xe07d, 0x188: 0x0008, 0x189: 0x0095, 0x18a: 0x00ad, 0x18b: 0xe03d,
+ 0x18c: 0x0008, 0x18d: 0x0008, 0x18e: 0x00c5, 0x18f: 0x00dd, 0x190: 0x00f5, 0x191: 0xe01d,
+ 0x192: 0x0008, 0x193: 0x010d, 0x194: 0x0125, 0x195: 0x0008, 0x196: 0x013d, 0x197: 0x013d,
+ 0x198: 0xe00d, 0x199: 0x0008, 0x19a: 0x0008, 0x19b: 0x0008, 0x19c: 0x010d, 0x19d: 0x0155,
+ 0x19e: 0x0008, 0x19f: 0x016d, 0x1a0: 0xe00d, 0x1a1: 0x0008, 0x1a2: 0xe00d, 0x1a3: 0x0008,
+ 0x1a4: 0xe00d, 0x1a5: 0x0008, 0x1a6: 0x0185, 0x1a7: 0xe07d, 0x1a8: 0x0008, 0x1a9: 0x019d,
+ 0x1aa: 0x0008, 0x1ab: 0x0008, 0x1ac: 0xe00d, 0x1ad: 0x0008, 0x1ae: 0x0185, 0x1af: 0xe0fd,
+ 0x1b0: 0x0008, 0x1b1: 0x01b5, 0x1b2: 0x01cd, 0x1b3: 0xe03d, 0x1b4: 0x0008, 0x1b5: 0xe01d,
+ 0x1b6: 0x0008, 0x1b7: 0x01e5, 0x1b8: 0xe00d, 0x1b9: 0x0008, 0x1ba: 0x0008, 0x1bb: 0x0008,
+ 0x1bc: 0xe00d, 0x1bd: 0x0008, 0x1be: 0x0008, 0x1bf: 0x0008,
+ // Block 0x7, offset 0x1c0
+ 0x1c0: 0x0008, 0x1c1: 0x0008, 0x1c2: 0x0008, 0x1c3: 0x0008, 0x1c4: 0x01e9, 0x1c5: 0x01e9,
+ 0x1c6: 0x01e9, 0x1c7: 0x01fd, 0x1c8: 0x0215, 0x1c9: 0x022d, 0x1ca: 0x0245, 0x1cb: 0x025d,
+ 0x1cc: 0x0275, 0x1cd: 0xe01d, 0x1ce: 0x0008, 0x1cf: 0xe0fd, 0x1d0: 0x0008, 0x1d1: 0xe01d,
+ 0x1d2: 0x0008, 0x1d3: 0xe03d, 0x1d4: 0x0008, 0x1d5: 0xe01d, 0x1d6: 0x0008, 0x1d7: 0xe07d,
+ 0x1d8: 0x0008, 0x1d9: 0xe01d, 0x1da: 0x0008, 0x1db: 0xe03d, 0x1dc: 0x0008, 0x1dd: 0x0008,
+ 0x1de: 0xe00d, 0x1df: 0x0008, 0x1e0: 0xe00d, 0x1e1: 0x0008, 0x1e2: 0xe00d, 0x1e3: 0x0008,
+ 0x1e4: 0xe00d, 0x1e5: 0x0008, 0x1e6: 0xe00d, 0x1e7: 0x0008, 0x1e8: 0xe00d, 0x1e9: 0x0008,
+ 0x1ea: 0xe00d, 0x1eb: 0x0008, 0x1ec: 0xe00d, 0x1ed: 0x0008, 0x1ee: 0xe00d, 0x1ef: 0x0008,
+ 0x1f0: 0x0008, 0x1f1: 0x028d, 0x1f2: 0x02a5, 0x1f3: 0x02bd, 0x1f4: 0xe00d, 0x1f5: 0x0008,
+ 0x1f6: 0x02d5, 0x1f7: 0x02ed, 0x1f8: 0xe00d, 0x1f9: 0x0008, 0x1fa: 0xe00d, 0x1fb: 0x0008,
+ 0x1fc: 0xe00d, 0x1fd: 0x0008, 0x1fe: 0xe00d, 0x1ff: 0x0008,
+ // Block 0x8, offset 0x200
+ 0x200: 0xe00d, 0x201: 0x0008, 0x202: 0xe00d, 0x203: 0x0008, 0x204: 0xe00d, 0x205: 0x0008,
+ 0x206: 0xe00d, 0x207: 0x0008, 0x208: 0xe00d, 0x209: 0x0008, 0x20a: 0xe00d, 0x20b: 0x0008,
+ 0x20c: 0xe00d, 0x20d: 0x0008, 0x20e: 0xe00d, 0x20f: 0x0008, 0x210: 0xe00d, 0x211: 0x0008,
+ 0x212: 0xe00d, 0x213: 0x0008, 0x214: 0xe00d, 0x215: 0x0008, 0x216: 0xe00d, 0x217: 0x0008,
+ 0x218: 0xe00d, 0x219: 0x0008, 0x21a: 0xe00d, 0x21b: 0x0008, 0x21c: 0xe00d, 0x21d: 0x0008,
+ 0x21e: 0xe00d, 0x21f: 0x0008, 0x220: 0x0305, 0x221: 0x0008, 0x222: 0xe00d, 0x223: 0x0008,
+ 0x224: 0xe00d, 0x225: 0x0008, 0x226: 0xe00d, 0x227: 0x0008, 0x228: 0xe00d, 0x229: 0x0008,
+ 0x22a: 0xe00d, 0x22b: 0x0008, 0x22c: 0xe00d, 0x22d: 0x0008, 0x22e: 0xe00d, 0x22f: 0x0008,
+ 0x230: 0xe00d, 0x231: 0x0008, 0x232: 0xe00d, 0x233: 0x0008, 0x234: 0x0008, 0x235: 0x0008,
+ 0x236: 0x0008, 0x237: 0x0008, 0x238: 0x0008, 0x239: 0x0008, 0x23a: 0x0209, 0x23b: 0xe03d,
+ 0x23c: 0x0008, 0x23d: 0x031d, 0x23e: 0x0229, 0x23f: 0x0008,
+ // Block 0x9, offset 0x240
+ 0x240: 0x0008, 0x241: 0x0008, 0x242: 0x0018, 0x243: 0x0018, 0x244: 0x0018, 0x245: 0x0018,
+ 0x246: 0x0008, 0x247: 0x0008, 0x248: 0x0008, 0x249: 0x0008, 0x24a: 0x0008, 0x24b: 0x0008,
+ 0x24c: 0x0008, 0x24d: 0x0008, 0x24e: 0x0008, 0x24f: 0x0008, 0x250: 0x0008, 0x251: 0x0008,
+ 0x252: 0x0018, 0x253: 0x0018, 0x254: 0x0018, 0x255: 0x0018, 0x256: 0x0018, 0x257: 0x0018,
+ 0x258: 0x029a, 0x259: 0x02ba, 0x25a: 0x02da, 0x25b: 0x02fa, 0x25c: 0x031a, 0x25d: 0x033a,
+ 0x25e: 0x0018, 0x25f: 0x0018, 0x260: 0x03ad, 0x261: 0x0359, 0x262: 0x01d9, 0x263: 0x0369,
+ 0x264: 0x03c5, 0x265: 0x0018, 0x266: 0x0018, 0x267: 0x0018, 0x268: 0x0018, 0x269: 0x0018,
+ 0x26a: 0x0018, 0x26b: 0x0018, 0x26c: 0x0008, 0x26d: 0x0018, 0x26e: 0x0008, 0x26f: 0x0018,
+ 0x270: 0x0018, 0x271: 0x0018, 0x272: 0x0018, 0x273: 0x0018, 0x274: 0x0018, 0x275: 0x0018,
+ 0x276: 0x0018, 0x277: 0x0018, 0x278: 0x0018, 0x279: 0x0018, 0x27a: 0x0018, 0x27b: 0x0018,
+ 0x27c: 0x0018, 0x27d: 0x0018, 0x27e: 0x0018, 0x27f: 0x0018,
+ // Block 0xa, offset 0x280
+ 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x3308, 0x283: 0x03f5, 0x284: 0x0379, 0x285: 0x040d,
+ 0x286: 0x3308, 0x287: 0x3308, 0x288: 0x3308, 0x289: 0x3308, 0x28a: 0x3308, 0x28b: 0x3308,
+ 0x28c: 0x3308, 0x28d: 0x3308, 0x28e: 0x3308, 0x28f: 0x33c0, 0x290: 0x3308, 0x291: 0x3308,
+ 0x292: 0x3308, 0x293: 0x3308, 0x294: 0x3308, 0x295: 0x3308, 0x296: 0x3308, 0x297: 0x3308,
+ 0x298: 0x3308, 0x299: 0x3308, 0x29a: 0x3308, 0x29b: 0x3308, 0x29c: 0x3308, 0x29d: 0x3308,
+ 0x29e: 0x3308, 0x29f: 0x3308, 0x2a0: 0x3308, 0x2a1: 0x3308, 0x2a2: 0x3308, 0x2a3: 0x3308,
+ 0x2a4: 0x3308, 0x2a5: 0x3308, 0x2a6: 0x3308, 0x2a7: 0x3308, 0x2a8: 0x3308, 0x2a9: 0x3308,
+ 0x2aa: 0x3308, 0x2ab: 0x3308, 0x2ac: 0x3308, 0x2ad: 0x3308, 0x2ae: 0x3308, 0x2af: 0x3308,
+ 0x2b0: 0xe00d, 0x2b1: 0x0008, 0x2b2: 0xe00d, 0x2b3: 0x0008, 0x2b4: 0x0425, 0x2b5: 0x0008,
+ 0x2b6: 0xe00d, 0x2b7: 0x0008, 0x2b8: 0x0040, 0x2b9: 0x0040, 0x2ba: 0x03a2, 0x2bb: 0x0008,
+ 0x2bc: 0x0008, 0x2bd: 0x0008, 0x2be: 0x03c2, 0x2bf: 0x043d,
+ // Block 0xb, offset 0x2c0
+ 0x2c0: 0x0040, 0x2c1: 0x0040, 0x2c2: 0x0040, 0x2c3: 0x0040, 0x2c4: 0x008a, 0x2c5: 0x03d2,
+ 0x2c6: 0xe155, 0x2c7: 0x0455, 0x2c8: 0xe12d, 0x2c9: 0xe13d, 0x2ca: 0xe12d, 0x2cb: 0x0040,
+ 0x2cc: 0x03dd, 0x2cd: 0x0040, 0x2ce: 0x046d, 0x2cf: 0x0485, 0x2d0: 0x0008, 0x2d1: 0xe105,
+ 0x2d2: 0xe105, 0x2d3: 0xe105, 0x2d4: 0xe105, 0x2d5: 0xe105, 0x2d6: 0xe105, 0x2d7: 0xe105,
+ 0x2d8: 0xe105, 0x2d9: 0xe105, 0x2da: 0xe105, 0x2db: 0xe105, 0x2dc: 0xe105, 0x2dd: 0xe105,
+ 0x2de: 0xe105, 0x2df: 0xe105, 0x2e0: 0x049d, 0x2e1: 0x049d, 0x2e2: 0x0040, 0x2e3: 0x049d,
+ 0x2e4: 0x049d, 0x2e5: 0x049d, 0x2e6: 0x049d, 0x2e7: 0x049d, 0x2e8: 0x049d, 0x2e9: 0x049d,
+ 0x2ea: 0x049d, 0x2eb: 0x049d, 0x2ec: 0x0008, 0x2ed: 0x0008, 0x2ee: 0x0008, 0x2ef: 0x0008,
+ 0x2f0: 0x0008, 0x2f1: 0x0008, 0x2f2: 0x0008, 0x2f3: 0x0008, 0x2f4: 0x0008, 0x2f5: 0x0008,
+ 0x2f6: 0x0008, 0x2f7: 0x0008, 0x2f8: 0x0008, 0x2f9: 0x0008, 0x2fa: 0x0008, 0x2fb: 0x0008,
+ 0x2fc: 0x0008, 0x2fd: 0x0008, 0x2fe: 0x0008, 0x2ff: 0x0008,
+ // Block 0xc, offset 0x300
+ 0x300: 0x0008, 0x301: 0x0008, 0x302: 0xe00f, 0x303: 0x0008, 0x304: 0x0008, 0x305: 0x0008,
+ 0x306: 0x0008, 0x307: 0x0008, 0x308: 0x0008, 0x309: 0x0008, 0x30a: 0x0008, 0x30b: 0x0008,
+ 0x30c: 0x0008, 0x30d: 0x0008, 0x30e: 0x0008, 0x30f: 0xe0c5, 0x310: 0x04b5, 0x311: 0x04cd,
+ 0x312: 0xe0bd, 0x313: 0xe0f5, 0x314: 0xe0fd, 0x315: 0xe09d, 0x316: 0xe0b5, 0x317: 0x0008,
+ 0x318: 0xe00d, 0x319: 0x0008, 0x31a: 0xe00d, 0x31b: 0x0008, 0x31c: 0xe00d, 0x31d: 0x0008,
+ 0x31e: 0xe00d, 0x31f: 0x0008, 0x320: 0xe00d, 0x321: 0x0008, 0x322: 0xe00d, 0x323: 0x0008,
+ 0x324: 0xe00d, 0x325: 0x0008, 0x326: 0xe00d, 0x327: 0x0008, 0x328: 0xe00d, 0x329: 0x0008,
+ 0x32a: 0xe00d, 0x32b: 0x0008, 0x32c: 0xe00d, 0x32d: 0x0008, 0x32e: 0xe00d, 0x32f: 0x0008,
+ 0x330: 0x04e5, 0x331: 0xe185, 0x332: 0xe18d, 0x333: 0x0008, 0x334: 0x04fd, 0x335: 0x03dd,
+ 0x336: 0x0018, 0x337: 0xe07d, 0x338: 0x0008, 0x339: 0xe1d5, 0x33a: 0xe00d, 0x33b: 0x0008,
+ 0x33c: 0x0008, 0x33d: 0x0515, 0x33e: 0x052d, 0x33f: 0x052d,
+ // Block 0xd, offset 0x340
+ 0x340: 0x0008, 0x341: 0x0008, 0x342: 0x0008, 0x343: 0x0008, 0x344: 0x0008, 0x345: 0x0008,
+ 0x346: 0x0008, 0x347: 0x0008, 0x348: 0x0008, 0x349: 0x0008, 0x34a: 0x0008, 0x34b: 0x0008,
+ 0x34c: 0x0008, 0x34d: 0x0008, 0x34e: 0x0008, 0x34f: 0x0008, 0x350: 0x0008, 0x351: 0x0008,
+ 0x352: 0x0008, 0x353: 0x0008, 0x354: 0x0008, 0x355: 0x0008, 0x356: 0x0008, 0x357: 0x0008,
+ 0x358: 0x0008, 0x359: 0x0008, 0x35a: 0x0008, 0x35b: 0x0008, 0x35c: 0x0008, 0x35d: 0x0008,
+ 0x35e: 0x0008, 0x35f: 0x0008, 0x360: 0xe00d, 0x361: 0x0008, 0x362: 0xe00d, 0x363: 0x0008,
+ 0x364: 0xe00d, 0x365: 0x0008, 0x366: 0xe00d, 0x367: 0x0008, 0x368: 0xe00d, 0x369: 0x0008,
+ 0x36a: 0xe00d, 0x36b: 0x0008, 0x36c: 0xe00d, 0x36d: 0x0008, 0x36e: 0xe00d, 0x36f: 0x0008,
+ 0x370: 0xe00d, 0x371: 0x0008, 0x372: 0xe00d, 0x373: 0x0008, 0x374: 0xe00d, 0x375: 0x0008,
+ 0x376: 0xe00d, 0x377: 0x0008, 0x378: 0xe00d, 0x379: 0x0008, 0x37a: 0xe00d, 0x37b: 0x0008,
+ 0x37c: 0xe00d, 0x37d: 0x0008, 0x37e: 0xe00d, 0x37f: 0x0008,
+ // Block 0xe, offset 0x380
+ 0x380: 0xe00d, 0x381: 0x0008, 0x382: 0x0018, 0x383: 0x3308, 0x384: 0x3308, 0x385: 0x3308,
+ 0x386: 0x3308, 0x387: 0x3308, 0x388: 0x3318, 0x389: 0x3318, 0x38a: 0xe00d, 0x38b: 0x0008,
+ 0x38c: 0xe00d, 0x38d: 0x0008, 0x38e: 0xe00d, 0x38f: 0x0008, 0x390: 0xe00d, 0x391: 0x0008,
+ 0x392: 0xe00d, 0x393: 0x0008, 0x394: 0xe00d, 0x395: 0x0008, 0x396: 0xe00d, 0x397: 0x0008,
+ 0x398: 0xe00d, 0x399: 0x0008, 0x39a: 0xe00d, 0x39b: 0x0008, 0x39c: 0xe00d, 0x39d: 0x0008,
+ 0x39e: 0xe00d, 0x39f: 0x0008, 0x3a0: 0xe00d, 0x3a1: 0x0008, 0x3a2: 0xe00d, 0x3a3: 0x0008,
+ 0x3a4: 0xe00d, 0x3a5: 0x0008, 0x3a6: 0xe00d, 0x3a7: 0x0008, 0x3a8: 0xe00d, 0x3a9: 0x0008,
+ 0x3aa: 0xe00d, 0x3ab: 0x0008, 0x3ac: 0xe00d, 0x3ad: 0x0008, 0x3ae: 0xe00d, 0x3af: 0x0008,
+ 0x3b0: 0xe00d, 0x3b1: 0x0008, 0x3b2: 0xe00d, 0x3b3: 0x0008, 0x3b4: 0xe00d, 0x3b5: 0x0008,
+ 0x3b6: 0xe00d, 0x3b7: 0x0008, 0x3b8: 0xe00d, 0x3b9: 0x0008, 0x3ba: 0xe00d, 0x3bb: 0x0008,
+ 0x3bc: 0xe00d, 0x3bd: 0x0008, 0x3be: 0xe00d, 0x3bf: 0x0008,
+ // Block 0xf, offset 0x3c0
+ 0x3c0: 0x0040, 0x3c1: 0xe01d, 0x3c2: 0x0008, 0x3c3: 0xe03d, 0x3c4: 0x0008, 0x3c5: 0xe01d,
+ 0x3c6: 0x0008, 0x3c7: 0xe07d, 0x3c8: 0x0008, 0x3c9: 0xe01d, 0x3ca: 0x0008, 0x3cb: 0xe03d,
+ 0x3cc: 0x0008, 0x3cd: 0xe01d, 0x3ce: 0x0008, 0x3cf: 0x0008, 0x3d0: 0xe00d, 0x3d1: 0x0008,
+ 0x3d2: 0xe00d, 0x3d3: 0x0008, 0x3d4: 0xe00d, 0x3d5: 0x0008, 0x3d6: 0xe00d, 0x3d7: 0x0008,
+ 0x3d8: 0xe00d, 0x3d9: 0x0008, 0x3da: 0xe00d, 0x3db: 0x0008, 0x3dc: 0xe00d, 0x3dd: 0x0008,
+ 0x3de: 0xe00d, 0x3df: 0x0008, 0x3e0: 0xe00d, 0x3e1: 0x0008, 0x3e2: 0xe00d, 0x3e3: 0x0008,
+ 0x3e4: 0xe00d, 0x3e5: 0x0008, 0x3e6: 0xe00d, 0x3e7: 0x0008, 0x3e8: 0xe00d, 0x3e9: 0x0008,
+ 0x3ea: 0xe00d, 0x3eb: 0x0008, 0x3ec: 0xe00d, 0x3ed: 0x0008, 0x3ee: 0xe00d, 0x3ef: 0x0008,
+ 0x3f0: 0xe00d, 0x3f1: 0x0008, 0x3f2: 0xe00d, 0x3f3: 0x0008, 0x3f4: 0xe00d, 0x3f5: 0x0008,
+ 0x3f6: 0xe00d, 0x3f7: 0x0008, 0x3f8: 0xe00d, 0x3f9: 0x0008, 0x3fa: 0xe00d, 0x3fb: 0x0008,
+ 0x3fc: 0xe00d, 0x3fd: 0x0008, 0x3fe: 0xe00d, 0x3ff: 0x0008,
+ // Block 0x10, offset 0x400
+ 0x400: 0xe00d, 0x401: 0x0008, 0x402: 0xe00d, 0x403: 0x0008, 0x404: 0xe00d, 0x405: 0x0008,
+ 0x406: 0xe00d, 0x407: 0x0008, 0x408: 0xe00d, 0x409: 0x0008, 0x40a: 0xe00d, 0x40b: 0x0008,
+ 0x40c: 0xe00d, 0x40d: 0x0008, 0x40e: 0xe00d, 0x40f: 0x0008, 0x410: 0xe00d, 0x411: 0x0008,
+ 0x412: 0xe00d, 0x413: 0x0008, 0x414: 0xe00d, 0x415: 0x0008, 0x416: 0xe00d, 0x417: 0x0008,
+ 0x418: 0xe00d, 0x419: 0x0008, 0x41a: 0xe00d, 0x41b: 0x0008, 0x41c: 0xe00d, 0x41d: 0x0008,
+ 0x41e: 0xe00d, 0x41f: 0x0008, 0x420: 0xe00d, 0x421: 0x0008, 0x422: 0xe00d, 0x423: 0x0008,
+ 0x424: 0xe00d, 0x425: 0x0008, 0x426: 0xe00d, 0x427: 0x0008, 0x428: 0xe00d, 0x429: 0x0008,
+ 0x42a: 0xe00d, 0x42b: 0x0008, 0x42c: 0xe00d, 0x42d: 0x0008, 0x42e: 0xe00d, 0x42f: 0x0008,
+ 0x430: 0x0040, 0x431: 0x03f5, 0x432: 0x03f5, 0x433: 0x03f5, 0x434: 0x03f5, 0x435: 0x03f5,
+ 0x436: 0x03f5, 0x437: 0x03f5, 0x438: 0x03f5, 0x439: 0x03f5, 0x43a: 0x03f5, 0x43b: 0x03f5,
+ 0x43c: 0x03f5, 0x43d: 0x03f5, 0x43e: 0x03f5, 0x43f: 0x03f5,
+ // Block 0x11, offset 0x440
+ 0x440: 0x0840, 0x441: 0x0840, 0x442: 0x0840, 0x443: 0x0840, 0x444: 0x0840, 0x445: 0x0840,
+ 0x446: 0x0018, 0x447: 0x0018, 0x448: 0x0818, 0x449: 0x0018, 0x44a: 0x0018, 0x44b: 0x0818,
+ 0x44c: 0x0018, 0x44d: 0x0818, 0x44e: 0x0018, 0x44f: 0x0018, 0x450: 0x3308, 0x451: 0x3308,
+ 0x452: 0x3308, 0x453: 0x3308, 0x454: 0x3308, 0x455: 0x3308, 0x456: 0x3308, 0x457: 0x3308,
+ 0x458: 0x3308, 0x459: 0x3308, 0x45a: 0x3308, 0x45b: 0x0818, 0x45c: 0x0b40, 0x45d: 0x0040,
+ 0x45e: 0x0818, 0x45f: 0x0818, 0x460: 0x0a08, 0x461: 0x0808, 0x462: 0x0c08, 0x463: 0x0c08,
+ 0x464: 0x0c08, 0x465: 0x0c08, 0x466: 0x0a08, 0x467: 0x0c08, 0x468: 0x0a08, 0x469: 0x0c08,
+ 0x46a: 0x0a08, 0x46b: 0x0a08, 0x46c: 0x0a08, 0x46d: 0x0a08, 0x46e: 0x0a08, 0x46f: 0x0c08,
+ 0x470: 0x0c08, 0x471: 0x0c08, 0x472: 0x0c08, 0x473: 0x0a08, 0x474: 0x0a08, 0x475: 0x0a08,
+ 0x476: 0x0a08, 0x477: 0x0a08, 0x478: 0x0a08, 0x479: 0x0a08, 0x47a: 0x0a08, 0x47b: 0x0a08,
+ 0x47c: 0x0a08, 0x47d: 0x0a08, 0x47e: 0x0a08, 0x47f: 0x0a08,
+ // Block 0x12, offset 0x480
+ 0x480: 0x0818, 0x481: 0x0a08, 0x482: 0x0a08, 0x483: 0x0a08, 0x484: 0x0a08, 0x485: 0x0a08,
+ 0x486: 0x0a08, 0x487: 0x0a08, 0x488: 0x0c08, 0x489: 0x0a08, 0x48a: 0x0a08, 0x48b: 0x3308,
+ 0x48c: 0x3308, 0x48d: 0x3308, 0x48e: 0x3308, 0x48f: 0x3308, 0x490: 0x3308, 0x491: 0x3308,
+ 0x492: 0x3308, 0x493: 0x3308, 0x494: 0x3308, 0x495: 0x3308, 0x496: 0x3308, 0x497: 0x3308,
+ 0x498: 0x3308, 0x499: 0x3308, 0x49a: 0x3308, 0x49b: 0x3308, 0x49c: 0x3308, 0x49d: 0x3308,
+ 0x49e: 0x3308, 0x49f: 0x3308, 0x4a0: 0x0808, 0x4a1: 0x0808, 0x4a2: 0x0808, 0x4a3: 0x0808,
+ 0x4a4: 0x0808, 0x4a5: 0x0808, 0x4a6: 0x0808, 0x4a7: 0x0808, 0x4a8: 0x0808, 0x4a9: 0x0808,
+ 0x4aa: 0x0018, 0x4ab: 0x0818, 0x4ac: 0x0818, 0x4ad: 0x0818, 0x4ae: 0x0a08, 0x4af: 0x0a08,
+ 0x4b0: 0x3308, 0x4b1: 0x0c08, 0x4b2: 0x0c08, 0x4b3: 0x0c08, 0x4b4: 0x0808, 0x4b5: 0x0429,
+ 0x4b6: 0x0451, 0x4b7: 0x0479, 0x4b8: 0x04a1, 0x4b9: 0x0a08, 0x4ba: 0x0a08, 0x4bb: 0x0a08,
+ 0x4bc: 0x0a08, 0x4bd: 0x0a08, 0x4be: 0x0a08, 0x4bf: 0x0a08,
+ // Block 0x13, offset 0x4c0
+ 0x4c0: 0x0c08, 0x4c1: 0x0a08, 0x4c2: 0x0a08, 0x4c3: 0x0c08, 0x4c4: 0x0c08, 0x4c5: 0x0c08,
+ 0x4c6: 0x0c08, 0x4c7: 0x0c08, 0x4c8: 0x0c08, 0x4c9: 0x0c08, 0x4ca: 0x0c08, 0x4cb: 0x0c08,
+ 0x4cc: 0x0a08, 0x4cd: 0x0c08, 0x4ce: 0x0a08, 0x4cf: 0x0c08, 0x4d0: 0x0a08, 0x4d1: 0x0a08,
+ 0x4d2: 0x0c08, 0x4d3: 0x0c08, 0x4d4: 0x0818, 0x4d5: 0x0c08, 0x4d6: 0x3308, 0x4d7: 0x3308,
+ 0x4d8: 0x3308, 0x4d9: 0x3308, 0x4da: 0x3308, 0x4db: 0x3308, 0x4dc: 0x3308, 0x4dd: 0x0840,
+ 0x4de: 0x0018, 0x4df: 0x3308, 0x4e0: 0x3308, 0x4e1: 0x3308, 0x4e2: 0x3308, 0x4e3: 0x3308,
+ 0x4e4: 0x3308, 0x4e5: 0x0808, 0x4e6: 0x0808, 0x4e7: 0x3308, 0x4e8: 0x3308, 0x4e9: 0x0018,
+ 0x4ea: 0x3308, 0x4eb: 0x3308, 0x4ec: 0x3308, 0x4ed: 0x3308, 0x4ee: 0x0c08, 0x4ef: 0x0c08,
+ 0x4f0: 0x0008, 0x4f1: 0x0008, 0x4f2: 0x0008, 0x4f3: 0x0008, 0x4f4: 0x0008, 0x4f5: 0x0008,
+ 0x4f6: 0x0008, 0x4f7: 0x0008, 0x4f8: 0x0008, 0x4f9: 0x0008, 0x4fa: 0x0a08, 0x4fb: 0x0a08,
+ 0x4fc: 0x0a08, 0x4fd: 0x0808, 0x4fe: 0x0808, 0x4ff: 0x0a08,
+ // Block 0x14, offset 0x500
+ 0x500: 0x0818, 0x501: 0x0818, 0x502: 0x0818, 0x503: 0x0818, 0x504: 0x0818, 0x505: 0x0818,
+ 0x506: 0x0818, 0x507: 0x0818, 0x508: 0x0818, 0x509: 0x0818, 0x50a: 0x0818, 0x50b: 0x0818,
+ 0x50c: 0x0818, 0x50d: 0x0818, 0x50e: 0x0040, 0x50f: 0x0b40, 0x510: 0x0c08, 0x511: 0x3308,
+ 0x512: 0x0a08, 0x513: 0x0a08, 0x514: 0x0a08, 0x515: 0x0c08, 0x516: 0x0c08, 0x517: 0x0c08,
+ 0x518: 0x0c08, 0x519: 0x0c08, 0x51a: 0x0a08, 0x51b: 0x0a08, 0x51c: 0x0a08, 0x51d: 0x0a08,
+ 0x51e: 0x0c08, 0x51f: 0x0a08, 0x520: 0x0a08, 0x521: 0x0a08, 0x522: 0x0a08, 0x523: 0x0a08,
+ 0x524: 0x0a08, 0x525: 0x0a08, 0x526: 0x0a08, 0x527: 0x0a08, 0x528: 0x0c08, 0x529: 0x0a08,
+ 0x52a: 0x0c08, 0x52b: 0x0a08, 0x52c: 0x0c08, 0x52d: 0x0a08, 0x52e: 0x0a08, 0x52f: 0x0c08,
+ 0x530: 0x3308, 0x531: 0x3308, 0x532: 0x3308, 0x533: 0x3308, 0x534: 0x3308, 0x535: 0x3308,
+ 0x536: 0x3308, 0x537: 0x3308, 0x538: 0x3308, 0x539: 0x3308, 0x53a: 0x3308, 0x53b: 0x3308,
+ 0x53c: 0x3308, 0x53d: 0x3308, 0x53e: 0x3308, 0x53f: 0x3308,
+ // Block 0x15, offset 0x540
+ 0x540: 0x0c08, 0x541: 0x0a08, 0x542: 0x0a08, 0x543: 0x0a08, 0x544: 0x0a08, 0x545: 0x0a08,
+ 0x546: 0x0c08, 0x547: 0x0c08, 0x548: 0x0a08, 0x549: 0x0c08, 0x54a: 0x0a08, 0x54b: 0x0a08,
+ 0x54c: 0x0a08, 0x54d: 0x0a08, 0x54e: 0x0a08, 0x54f: 0x0a08, 0x550: 0x0a08, 0x551: 0x0a08,
+ 0x552: 0x0a08, 0x553: 0x0a08, 0x554: 0x0c08, 0x555: 0x0a08, 0x556: 0x0808, 0x557: 0x0808,
+ 0x558: 0x0808, 0x559: 0x3308, 0x55a: 0x3308, 0x55b: 0x3308, 0x55c: 0x0040, 0x55d: 0x0040,
+ 0x55e: 0x0818, 0x55f: 0x0040, 0x560: 0x0a08, 0x561: 0x0808, 0x562: 0x0a08, 0x563: 0x0a08,
+ 0x564: 0x0a08, 0x565: 0x0a08, 0x566: 0x0808, 0x567: 0x0c08, 0x568: 0x0a08, 0x569: 0x0c08,
+ 0x56a: 0x0c08, 0x56b: 0x0040, 0x56c: 0x0040, 0x56d: 0x0040, 0x56e: 0x0040, 0x56f: 0x0040,
+ 0x570: 0x0040, 0x571: 0x0040, 0x572: 0x0040, 0x573: 0x0040, 0x574: 0x0040, 0x575: 0x0040,
+ 0x576: 0x0040, 0x577: 0x0040, 0x578: 0x0040, 0x579: 0x0040, 0x57a: 0x0040, 0x57b: 0x0040,
+ 0x57c: 0x0040, 0x57d: 0x0040, 0x57e: 0x0040, 0x57f: 0x0040,
+ // Block 0x16, offset 0x580
+ 0x580: 0x3008, 0x581: 0x3308, 0x582: 0x3308, 0x583: 0x3308, 0x584: 0x3308, 0x585: 0x3308,
+ 0x586: 0x3308, 0x587: 0x3308, 0x588: 0x3308, 0x589: 0x3008, 0x58a: 0x3008, 0x58b: 0x3008,
+ 0x58c: 0x3008, 0x58d: 0x3b08, 0x58e: 0x3008, 0x58f: 0x3008, 0x590: 0x0008, 0x591: 0x3308,
+ 0x592: 0x3308, 0x593: 0x3308, 0x594: 0x3308, 0x595: 0x3308, 0x596: 0x3308, 0x597: 0x3308,
+ 0x598: 0x04c9, 0x599: 0x0501, 0x59a: 0x0539, 0x59b: 0x0571, 0x59c: 0x05a9, 0x59d: 0x05e1,
+ 0x59e: 0x0619, 0x59f: 0x0651, 0x5a0: 0x0008, 0x5a1: 0x0008, 0x5a2: 0x3308, 0x5a3: 0x3308,
+ 0x5a4: 0x0018, 0x5a5: 0x0018, 0x5a6: 0x0008, 0x5a7: 0x0008, 0x5a8: 0x0008, 0x5a9: 0x0008,
+ 0x5aa: 0x0008, 0x5ab: 0x0008, 0x5ac: 0x0008, 0x5ad: 0x0008, 0x5ae: 0x0008, 0x5af: 0x0008,
+ 0x5b0: 0x0018, 0x5b1: 0x0008, 0x5b2: 0x0008, 0x5b3: 0x0008, 0x5b4: 0x0008, 0x5b5: 0x0008,
+ 0x5b6: 0x0008, 0x5b7: 0x0008, 0x5b8: 0x0008, 0x5b9: 0x0008, 0x5ba: 0x0008, 0x5bb: 0x0008,
+ 0x5bc: 0x0008, 0x5bd: 0x0008, 0x5be: 0x0008, 0x5bf: 0x0008,
+ // Block 0x17, offset 0x5c0
+ 0x5c0: 0x0008, 0x5c1: 0x3308, 0x5c2: 0x3008, 0x5c3: 0x3008, 0x5c4: 0x0040, 0x5c5: 0x0008,
+ 0x5c6: 0x0008, 0x5c7: 0x0008, 0x5c8: 0x0008, 0x5c9: 0x0008, 0x5ca: 0x0008, 0x5cb: 0x0008,
+ 0x5cc: 0x0008, 0x5cd: 0x0040, 0x5ce: 0x0040, 0x5cf: 0x0008, 0x5d0: 0x0008, 0x5d1: 0x0040,
+ 0x5d2: 0x0040, 0x5d3: 0x0008, 0x5d4: 0x0008, 0x5d5: 0x0008, 0x5d6: 0x0008, 0x5d7: 0x0008,
+ 0x5d8: 0x0008, 0x5d9: 0x0008, 0x5da: 0x0008, 0x5db: 0x0008, 0x5dc: 0x0008, 0x5dd: 0x0008,
+ 0x5de: 0x0008, 0x5df: 0x0008, 0x5e0: 0x0008, 0x5e1: 0x0008, 0x5e2: 0x0008, 0x5e3: 0x0008,
+ 0x5e4: 0x0008, 0x5e5: 0x0008, 0x5e6: 0x0008, 0x5e7: 0x0008, 0x5e8: 0x0008, 0x5e9: 0x0040,
+ 0x5ea: 0x0008, 0x5eb: 0x0008, 0x5ec: 0x0008, 0x5ed: 0x0008, 0x5ee: 0x0008, 0x5ef: 0x0008,
+ 0x5f0: 0x0008, 0x5f1: 0x0040, 0x5f2: 0x0008, 0x5f3: 0x0040, 0x5f4: 0x0040, 0x5f5: 0x0040,
+ 0x5f6: 0x0008, 0x5f7: 0x0008, 0x5f8: 0x0008, 0x5f9: 0x0008, 0x5fa: 0x0040, 0x5fb: 0x0040,
+ 0x5fc: 0x3308, 0x5fd: 0x0008, 0x5fe: 0x3008, 0x5ff: 0x3008,
+ // Block 0x18, offset 0x600
+ 0x600: 0x3008, 0x601: 0x3308, 0x602: 0x3308, 0x603: 0x3308, 0x604: 0x3308, 0x605: 0x0040,
+ 0x606: 0x0040, 0x607: 0x3008, 0x608: 0x3008, 0x609: 0x0040, 0x60a: 0x0040, 0x60b: 0x3008,
+ 0x60c: 0x3008, 0x60d: 0x3b08, 0x60e: 0x0008, 0x60f: 0x0040, 0x610: 0x0040, 0x611: 0x0040,
+ 0x612: 0x0040, 0x613: 0x0040, 0x614: 0x0040, 0x615: 0x0040, 0x616: 0x0040, 0x617: 0x3008,
+ 0x618: 0x0040, 0x619: 0x0040, 0x61a: 0x0040, 0x61b: 0x0040, 0x61c: 0x0689, 0x61d: 0x06c1,
+ 0x61e: 0x0040, 0x61f: 0x06f9, 0x620: 0x0008, 0x621: 0x0008, 0x622: 0x3308, 0x623: 0x3308,
+ 0x624: 0x0040, 0x625: 0x0040, 0x626: 0x0008, 0x627: 0x0008, 0x628: 0x0008, 0x629: 0x0008,
+ 0x62a: 0x0008, 0x62b: 0x0008, 0x62c: 0x0008, 0x62d: 0x0008, 0x62e: 0x0008, 0x62f: 0x0008,
+ 0x630: 0x0008, 0x631: 0x0008, 0x632: 0x0018, 0x633: 0x0018, 0x634: 0x0018, 0x635: 0x0018,
+ 0x636: 0x0018, 0x637: 0x0018, 0x638: 0x0018, 0x639: 0x0018, 0x63a: 0x0018, 0x63b: 0x0018,
+ 0x63c: 0x0008, 0x63d: 0x0018, 0x63e: 0x3308, 0x63f: 0x0040,
+ // Block 0x19, offset 0x640
+ 0x640: 0x0040, 0x641: 0x3308, 0x642: 0x3308, 0x643: 0x3008, 0x644: 0x0040, 0x645: 0x0008,
+ 0x646: 0x0008, 0x647: 0x0008, 0x648: 0x0008, 0x649: 0x0008, 0x64a: 0x0008, 0x64b: 0x0040,
+ 0x64c: 0x0040, 0x64d: 0x0040, 0x64e: 0x0040, 0x64f: 0x0008, 0x650: 0x0008, 0x651: 0x0040,
+ 0x652: 0x0040, 0x653: 0x0008, 0x654: 0x0008, 0x655: 0x0008, 0x656: 0x0008, 0x657: 0x0008,
+ 0x658: 0x0008, 0x659: 0x0008, 0x65a: 0x0008, 0x65b: 0x0008, 0x65c: 0x0008, 0x65d: 0x0008,
+ 0x65e: 0x0008, 0x65f: 0x0008, 0x660: 0x0008, 0x661: 0x0008, 0x662: 0x0008, 0x663: 0x0008,
+ 0x664: 0x0008, 0x665: 0x0008, 0x666: 0x0008, 0x667: 0x0008, 0x668: 0x0008, 0x669: 0x0040,
+ 0x66a: 0x0008, 0x66b: 0x0008, 0x66c: 0x0008, 0x66d: 0x0008, 0x66e: 0x0008, 0x66f: 0x0008,
+ 0x670: 0x0008, 0x671: 0x0040, 0x672: 0x0008, 0x673: 0x0731, 0x674: 0x0040, 0x675: 0x0008,
+ 0x676: 0x0769, 0x677: 0x0040, 0x678: 0x0008, 0x679: 0x0008, 0x67a: 0x0040, 0x67b: 0x0040,
+ 0x67c: 0x3308, 0x67d: 0x0040, 0x67e: 0x3008, 0x67f: 0x3008,
+ // Block 0x1a, offset 0x680
+ 0x680: 0x3008, 0x681: 0x3308, 0x682: 0x3308, 0x683: 0x0040, 0x684: 0x0040, 0x685: 0x0040,
+ 0x686: 0x0040, 0x687: 0x3308, 0x688: 0x3308, 0x689: 0x0040, 0x68a: 0x0040, 0x68b: 0x3308,
+ 0x68c: 0x3308, 0x68d: 0x3b08, 0x68e: 0x0040, 0x68f: 0x0040, 0x690: 0x0040, 0x691: 0x3308,
+ 0x692: 0x0040, 0x693: 0x0040, 0x694: 0x0040, 0x695: 0x0040, 0x696: 0x0040, 0x697: 0x0040,
+ 0x698: 0x0040, 0x699: 0x07a1, 0x69a: 0x07d9, 0x69b: 0x0811, 0x69c: 0x0008, 0x69d: 0x0040,
+ 0x69e: 0x0849, 0x69f: 0x0040, 0x6a0: 0x0040, 0x6a1: 0x0040, 0x6a2: 0x0040, 0x6a3: 0x0040,
+ 0x6a4: 0x0040, 0x6a5: 0x0040, 0x6a6: 0x0008, 0x6a7: 0x0008, 0x6a8: 0x0008, 0x6a9: 0x0008,
+ 0x6aa: 0x0008, 0x6ab: 0x0008, 0x6ac: 0x0008, 0x6ad: 0x0008, 0x6ae: 0x0008, 0x6af: 0x0008,
+ 0x6b0: 0x3308, 0x6b1: 0x3308, 0x6b2: 0x0008, 0x6b3: 0x0008, 0x6b4: 0x0008, 0x6b5: 0x3308,
+ 0x6b6: 0x0018, 0x6b7: 0x0040, 0x6b8: 0x0040, 0x6b9: 0x0040, 0x6ba: 0x0040, 0x6bb: 0x0040,
+ 0x6bc: 0x0040, 0x6bd: 0x0040, 0x6be: 0x0040, 0x6bf: 0x0040,
+ // Block 0x1b, offset 0x6c0
+ 0x6c0: 0x0040, 0x6c1: 0x3308, 0x6c2: 0x3308, 0x6c3: 0x3008, 0x6c4: 0x0040, 0x6c5: 0x0008,
+ 0x6c6: 0x0008, 0x6c7: 0x0008, 0x6c8: 0x0008, 0x6c9: 0x0008, 0x6ca: 0x0008, 0x6cb: 0x0008,
+ 0x6cc: 0x0008, 0x6cd: 0x0008, 0x6ce: 0x0040, 0x6cf: 0x0008, 0x6d0: 0x0008, 0x6d1: 0x0008,
+ 0x6d2: 0x0040, 0x6d3: 0x0008, 0x6d4: 0x0008, 0x6d5: 0x0008, 0x6d6: 0x0008, 0x6d7: 0x0008,
+ 0x6d8: 0x0008, 0x6d9: 0x0008, 0x6da: 0x0008, 0x6db: 0x0008, 0x6dc: 0x0008, 0x6dd: 0x0008,
+ 0x6de: 0x0008, 0x6df: 0x0008, 0x6e0: 0x0008, 0x6e1: 0x0008, 0x6e2: 0x0008, 0x6e3: 0x0008,
+ 0x6e4: 0x0008, 0x6e5: 0x0008, 0x6e6: 0x0008, 0x6e7: 0x0008, 0x6e8: 0x0008, 0x6e9: 0x0040,
+ 0x6ea: 0x0008, 0x6eb: 0x0008, 0x6ec: 0x0008, 0x6ed: 0x0008, 0x6ee: 0x0008, 0x6ef: 0x0008,
+ 0x6f0: 0x0008, 0x6f1: 0x0040, 0x6f2: 0x0008, 0x6f3: 0x0008, 0x6f4: 0x0040, 0x6f5: 0x0008,
+ 0x6f6: 0x0008, 0x6f7: 0x0008, 0x6f8: 0x0008, 0x6f9: 0x0008, 0x6fa: 0x0040, 0x6fb: 0x0040,
+ 0x6fc: 0x3308, 0x6fd: 0x0008, 0x6fe: 0x3008, 0x6ff: 0x3008,
+ // Block 0x1c, offset 0x700
+ 0x700: 0x3008, 0x701: 0x3308, 0x702: 0x3308, 0x703: 0x3308, 0x704: 0x3308, 0x705: 0x3308,
+ 0x706: 0x0040, 0x707: 0x3308, 0x708: 0x3308, 0x709: 0x3008, 0x70a: 0x0040, 0x70b: 0x3008,
+ 0x70c: 0x3008, 0x70d: 0x3b08, 0x70e: 0x0040, 0x70f: 0x0040, 0x710: 0x0008, 0x711: 0x0040,
+ 0x712: 0x0040, 0x713: 0x0040, 0x714: 0x0040, 0x715: 0x0040, 0x716: 0x0040, 0x717: 0x0040,
+ 0x718: 0x0040, 0x719: 0x0040, 0x71a: 0x0040, 0x71b: 0x0040, 0x71c: 0x0040, 0x71d: 0x0040,
+ 0x71e: 0x0040, 0x71f: 0x0040, 0x720: 0x0008, 0x721: 0x0008, 0x722: 0x3308, 0x723: 0x3308,
+ 0x724: 0x0040, 0x725: 0x0040, 0x726: 0x0008, 0x727: 0x0008, 0x728: 0x0008, 0x729: 0x0008,
+ 0x72a: 0x0008, 0x72b: 0x0008, 0x72c: 0x0008, 0x72d: 0x0008, 0x72e: 0x0008, 0x72f: 0x0008,
+ 0x730: 0x0018, 0x731: 0x0018, 0x732: 0x0040, 0x733: 0x0040, 0x734: 0x0040, 0x735: 0x0040,
+ 0x736: 0x0040, 0x737: 0x0040, 0x738: 0x0040, 0x739: 0x0008, 0x73a: 0x3308, 0x73b: 0x3308,
+ 0x73c: 0x3308, 0x73d: 0x3308, 0x73e: 0x3308, 0x73f: 0x3308,
+ // Block 0x1d, offset 0x740
+ 0x740: 0x0040, 0x741: 0x3308, 0x742: 0x3008, 0x743: 0x3008, 0x744: 0x0040, 0x745: 0x0008,
+ 0x746: 0x0008, 0x747: 0x0008, 0x748: 0x0008, 0x749: 0x0008, 0x74a: 0x0008, 0x74b: 0x0008,
+ 0x74c: 0x0008, 0x74d: 0x0040, 0x74e: 0x0040, 0x74f: 0x0008, 0x750: 0x0008, 0x751: 0x0040,
+ 0x752: 0x0040, 0x753: 0x0008, 0x754: 0x0008, 0x755: 0x0008, 0x756: 0x0008, 0x757: 0x0008,
+ 0x758: 0x0008, 0x759: 0x0008, 0x75a: 0x0008, 0x75b: 0x0008, 0x75c: 0x0008, 0x75d: 0x0008,
+ 0x75e: 0x0008, 0x75f: 0x0008, 0x760: 0x0008, 0x761: 0x0008, 0x762: 0x0008, 0x763: 0x0008,
+ 0x764: 0x0008, 0x765: 0x0008, 0x766: 0x0008, 0x767: 0x0008, 0x768: 0x0008, 0x769: 0x0040,
+ 0x76a: 0x0008, 0x76b: 0x0008, 0x76c: 0x0008, 0x76d: 0x0008, 0x76e: 0x0008, 0x76f: 0x0008,
+ 0x770: 0x0008, 0x771: 0x0040, 0x772: 0x0008, 0x773: 0x0008, 0x774: 0x0040, 0x775: 0x0008,
+ 0x776: 0x0008, 0x777: 0x0008, 0x778: 0x0008, 0x779: 0x0008, 0x77a: 0x0040, 0x77b: 0x0040,
+ 0x77c: 0x3308, 0x77d: 0x0008, 0x77e: 0x3008, 0x77f: 0x3308,
+ // Block 0x1e, offset 0x780
+ 0x780: 0x3008, 0x781: 0x3308, 0x782: 0x3308, 0x783: 0x3308, 0x784: 0x3308, 0x785: 0x0040,
+ 0x786: 0x0040, 0x787: 0x3008, 0x788: 0x3008, 0x789: 0x0040, 0x78a: 0x0040, 0x78b: 0x3008,
+ 0x78c: 0x3008, 0x78d: 0x3b08, 0x78e: 0x0040, 0x78f: 0x0040, 0x790: 0x0040, 0x791: 0x0040,
+ 0x792: 0x0040, 0x793: 0x0040, 0x794: 0x0040, 0x795: 0x0040, 0x796: 0x3308, 0x797: 0x3008,
+ 0x798: 0x0040, 0x799: 0x0040, 0x79a: 0x0040, 0x79b: 0x0040, 0x79c: 0x0881, 0x79d: 0x08b9,
+ 0x79e: 0x0040, 0x79f: 0x0008, 0x7a0: 0x0008, 0x7a1: 0x0008, 0x7a2: 0x3308, 0x7a3: 0x3308,
+ 0x7a4: 0x0040, 0x7a5: 0x0040, 0x7a6: 0x0008, 0x7a7: 0x0008, 0x7a8: 0x0008, 0x7a9: 0x0008,
+ 0x7aa: 0x0008, 0x7ab: 0x0008, 0x7ac: 0x0008, 0x7ad: 0x0008, 0x7ae: 0x0008, 0x7af: 0x0008,
+ 0x7b0: 0x0018, 0x7b1: 0x0008, 0x7b2: 0x0018, 0x7b3: 0x0018, 0x7b4: 0x0018, 0x7b5: 0x0018,
+ 0x7b6: 0x0018, 0x7b7: 0x0018, 0x7b8: 0x0040, 0x7b9: 0x0040, 0x7ba: 0x0040, 0x7bb: 0x0040,
+ 0x7bc: 0x0040, 0x7bd: 0x0040, 0x7be: 0x0040, 0x7bf: 0x0040,
+ // Block 0x1f, offset 0x7c0
+ 0x7c0: 0x0040, 0x7c1: 0x0040, 0x7c2: 0x3308, 0x7c3: 0x0008, 0x7c4: 0x0040, 0x7c5: 0x0008,
+ 0x7c6: 0x0008, 0x7c7: 0x0008, 0x7c8: 0x0008, 0x7c9: 0x0008, 0x7ca: 0x0008, 0x7cb: 0x0040,
+ 0x7cc: 0x0040, 0x7cd: 0x0040, 0x7ce: 0x0008, 0x7cf: 0x0008, 0x7d0: 0x0008, 0x7d1: 0x0040,
+ 0x7d2: 0x0008, 0x7d3: 0x0008, 0x7d4: 0x0008, 0x7d5: 0x0008, 0x7d6: 0x0040, 0x7d7: 0x0040,
+ 0x7d8: 0x0040, 0x7d9: 0x0008, 0x7da: 0x0008, 0x7db: 0x0040, 0x7dc: 0x0008, 0x7dd: 0x0040,
+ 0x7de: 0x0008, 0x7df: 0x0008, 0x7e0: 0x0040, 0x7e1: 0x0040, 0x7e2: 0x0040, 0x7e3: 0x0008,
+ 0x7e4: 0x0008, 0x7e5: 0x0040, 0x7e6: 0x0040, 0x7e7: 0x0040, 0x7e8: 0x0008, 0x7e9: 0x0008,
+ 0x7ea: 0x0008, 0x7eb: 0x0040, 0x7ec: 0x0040, 0x7ed: 0x0040, 0x7ee: 0x0008, 0x7ef: 0x0008,
+ 0x7f0: 0x0008, 0x7f1: 0x0008, 0x7f2: 0x0008, 0x7f3: 0x0008, 0x7f4: 0x0008, 0x7f5: 0x0008,
+ 0x7f6: 0x0008, 0x7f7: 0x0008, 0x7f8: 0x0008, 0x7f9: 0x0008, 0x7fa: 0x0040, 0x7fb: 0x0040,
+ 0x7fc: 0x0040, 0x7fd: 0x0040, 0x7fe: 0x3008, 0x7ff: 0x3008,
+ // Block 0x20, offset 0x800
+ 0x800: 0x3308, 0x801: 0x3008, 0x802: 0x3008, 0x803: 0x3008, 0x804: 0x3008, 0x805: 0x0040,
+ 0x806: 0x3308, 0x807: 0x3308, 0x808: 0x3308, 0x809: 0x0040, 0x80a: 0x3308, 0x80b: 0x3308,
+ 0x80c: 0x3308, 0x80d: 0x3b08, 0x80e: 0x0040, 0x80f: 0x0040, 0x810: 0x0040, 0x811: 0x0040,
+ 0x812: 0x0040, 0x813: 0x0040, 0x814: 0x0040, 0x815: 0x3308, 0x816: 0x3308, 0x817: 0x0040,
+ 0x818: 0x0008, 0x819: 0x0008, 0x81a: 0x0008, 0x81b: 0x0040, 0x81c: 0x0040, 0x81d: 0x0040,
+ 0x81e: 0x0040, 0x81f: 0x0040, 0x820: 0x0008, 0x821: 0x0008, 0x822: 0x3308, 0x823: 0x3308,
+ 0x824: 0x0040, 0x825: 0x0040, 0x826: 0x0008, 0x827: 0x0008, 0x828: 0x0008, 0x829: 0x0008,
+ 0x82a: 0x0008, 0x82b: 0x0008, 0x82c: 0x0008, 0x82d: 0x0008, 0x82e: 0x0008, 0x82f: 0x0008,
+ 0x830: 0x0040, 0x831: 0x0040, 0x832: 0x0040, 0x833: 0x0040, 0x834: 0x0040, 0x835: 0x0040,
+ 0x836: 0x0040, 0x837: 0x0018, 0x838: 0x0018, 0x839: 0x0018, 0x83a: 0x0018, 0x83b: 0x0018,
+ 0x83c: 0x0018, 0x83d: 0x0018, 0x83e: 0x0018, 0x83f: 0x0018,
+ // Block 0x21, offset 0x840
+ 0x840: 0x0008, 0x841: 0x3308, 0x842: 0x3008, 0x843: 0x3008, 0x844: 0x0018, 0x845: 0x0008,
+ 0x846: 0x0008, 0x847: 0x0008, 0x848: 0x0008, 0x849: 0x0008, 0x84a: 0x0008, 0x84b: 0x0008,
+ 0x84c: 0x0008, 0x84d: 0x0040, 0x84e: 0x0008, 0x84f: 0x0008, 0x850: 0x0008, 0x851: 0x0040,
+ 0x852: 0x0008, 0x853: 0x0008, 0x854: 0x0008, 0x855: 0x0008, 0x856: 0x0008, 0x857: 0x0008,
+ 0x858: 0x0008, 0x859: 0x0008, 0x85a: 0x0008, 0x85b: 0x0008, 0x85c: 0x0008, 0x85d: 0x0008,
+ 0x85e: 0x0008, 0x85f: 0x0008, 0x860: 0x0008, 0x861: 0x0008, 0x862: 0x0008, 0x863: 0x0008,
+ 0x864: 0x0008, 0x865: 0x0008, 0x866: 0x0008, 0x867: 0x0008, 0x868: 0x0008, 0x869: 0x0040,
+ 0x86a: 0x0008, 0x86b: 0x0008, 0x86c: 0x0008, 0x86d: 0x0008, 0x86e: 0x0008, 0x86f: 0x0008,
+ 0x870: 0x0008, 0x871: 0x0008, 0x872: 0x0008, 0x873: 0x0008, 0x874: 0x0040, 0x875: 0x0008,
+ 0x876: 0x0008, 0x877: 0x0008, 0x878: 0x0008, 0x879: 0x0008, 0x87a: 0x0040, 0x87b: 0x0040,
+ 0x87c: 0x3308, 0x87d: 0x0008, 0x87e: 0x3008, 0x87f: 0x3308,
+ // Block 0x22, offset 0x880
+ 0x880: 0x3008, 0x881: 0x3008, 0x882: 0x3008, 0x883: 0x3008, 0x884: 0x3008, 0x885: 0x0040,
+ 0x886: 0x3308, 0x887: 0x3008, 0x888: 0x3008, 0x889: 0x0040, 0x88a: 0x3008, 0x88b: 0x3008,
+ 0x88c: 0x3308, 0x88d: 0x3b08, 0x88e: 0x0040, 0x88f: 0x0040, 0x890: 0x0040, 0x891: 0x0040,
+ 0x892: 0x0040, 0x893: 0x0040, 0x894: 0x0040, 0x895: 0x3008, 0x896: 0x3008, 0x897: 0x0040,
+ 0x898: 0x0040, 0x899: 0x0040, 0x89a: 0x0040, 0x89b: 0x0040, 0x89c: 0x0040, 0x89d: 0x0040,
+ 0x89e: 0x0008, 0x89f: 0x0040, 0x8a0: 0x0008, 0x8a1: 0x0008, 0x8a2: 0x3308, 0x8a3: 0x3308,
+ 0x8a4: 0x0040, 0x8a5: 0x0040, 0x8a6: 0x0008, 0x8a7: 0x0008, 0x8a8: 0x0008, 0x8a9: 0x0008,
+ 0x8aa: 0x0008, 0x8ab: 0x0008, 0x8ac: 0x0008, 0x8ad: 0x0008, 0x8ae: 0x0008, 0x8af: 0x0008,
+ 0x8b0: 0x0040, 0x8b1: 0x0008, 0x8b2: 0x0008, 0x8b3: 0x0040, 0x8b4: 0x0040, 0x8b5: 0x0040,
+ 0x8b6: 0x0040, 0x8b7: 0x0040, 0x8b8: 0x0040, 0x8b9: 0x0040, 0x8ba: 0x0040, 0x8bb: 0x0040,
+ 0x8bc: 0x0040, 0x8bd: 0x0040, 0x8be: 0x0040, 0x8bf: 0x0040,
+ // Block 0x23, offset 0x8c0
+ 0x8c0: 0x3008, 0x8c1: 0x3308, 0x8c2: 0x3308, 0x8c3: 0x3308, 0x8c4: 0x3308, 0x8c5: 0x0040,
+ 0x8c6: 0x3008, 0x8c7: 0x3008, 0x8c8: 0x3008, 0x8c9: 0x0040, 0x8ca: 0x3008, 0x8cb: 0x3008,
+ 0x8cc: 0x3008, 0x8cd: 0x3b08, 0x8ce: 0x0008, 0x8cf: 0x0018, 0x8d0: 0x0040, 0x8d1: 0x0040,
+ 0x8d2: 0x0040, 0x8d3: 0x0040, 0x8d4: 0x0008, 0x8d5: 0x0008, 0x8d6: 0x0008, 0x8d7: 0x3008,
+ 0x8d8: 0x0018, 0x8d9: 0x0018, 0x8da: 0x0018, 0x8db: 0x0018, 0x8dc: 0x0018, 0x8dd: 0x0018,
+ 0x8de: 0x0018, 0x8df: 0x0008, 0x8e0: 0x0008, 0x8e1: 0x0008, 0x8e2: 0x3308, 0x8e3: 0x3308,
+ 0x8e4: 0x0040, 0x8e5: 0x0040, 0x8e6: 0x0008, 0x8e7: 0x0008, 0x8e8: 0x0008, 0x8e9: 0x0008,
+ 0x8ea: 0x0008, 0x8eb: 0x0008, 0x8ec: 0x0008, 0x8ed: 0x0008, 0x8ee: 0x0008, 0x8ef: 0x0008,
+ 0x8f0: 0x0018, 0x8f1: 0x0018, 0x8f2: 0x0018, 0x8f3: 0x0018, 0x8f4: 0x0018, 0x8f5: 0x0018,
+ 0x8f6: 0x0018, 0x8f7: 0x0018, 0x8f8: 0x0018, 0x8f9: 0x0018, 0x8fa: 0x0008, 0x8fb: 0x0008,
+ 0x8fc: 0x0008, 0x8fd: 0x0008, 0x8fe: 0x0008, 0x8ff: 0x0008,
+ // Block 0x24, offset 0x900
+ 0x900: 0x0040, 0x901: 0x0008, 0x902: 0x0008, 0x903: 0x0040, 0x904: 0x0008, 0x905: 0x0040,
+ 0x906: 0x0008, 0x907: 0x0008, 0x908: 0x0008, 0x909: 0x0008, 0x90a: 0x0008, 0x90b: 0x0040,
+ 0x90c: 0x0008, 0x90d: 0x0008, 0x90e: 0x0008, 0x90f: 0x0008, 0x910: 0x0008, 0x911: 0x0008,
+ 0x912: 0x0008, 0x913: 0x0008, 0x914: 0x0008, 0x915: 0x0008, 0x916: 0x0008, 0x917: 0x0008,
+ 0x918: 0x0008, 0x919: 0x0008, 0x91a: 0x0008, 0x91b: 0x0008, 0x91c: 0x0008, 0x91d: 0x0008,
+ 0x91e: 0x0008, 0x91f: 0x0008, 0x920: 0x0008, 0x921: 0x0008, 0x922: 0x0008, 0x923: 0x0008,
+ 0x924: 0x0040, 0x925: 0x0008, 0x926: 0x0040, 0x927: 0x0008, 0x928: 0x0008, 0x929: 0x0008,
+ 0x92a: 0x0008, 0x92b: 0x0008, 0x92c: 0x0008, 0x92d: 0x0008, 0x92e: 0x0008, 0x92f: 0x0008,
+ 0x930: 0x0008, 0x931: 0x3308, 0x932: 0x0008, 0x933: 0x0929, 0x934: 0x3308, 0x935: 0x3308,
+ 0x936: 0x3308, 0x937: 0x3308, 0x938: 0x3308, 0x939: 0x3308, 0x93a: 0x3b08, 0x93b: 0x3308,
+ 0x93c: 0x3308, 0x93d: 0x0008, 0x93e: 0x0040, 0x93f: 0x0040,
+ // Block 0x25, offset 0x940
+ 0x940: 0x0008, 0x941: 0x0008, 0x942: 0x0008, 0x943: 0x09d1, 0x944: 0x0008, 0x945: 0x0008,
+ 0x946: 0x0008, 0x947: 0x0008, 0x948: 0x0040, 0x949: 0x0008, 0x94a: 0x0008, 0x94b: 0x0008,
+ 0x94c: 0x0008, 0x94d: 0x0a09, 0x94e: 0x0008, 0x94f: 0x0008, 0x950: 0x0008, 0x951: 0x0008,
+ 0x952: 0x0a41, 0x953: 0x0008, 0x954: 0x0008, 0x955: 0x0008, 0x956: 0x0008, 0x957: 0x0a79,
+ 0x958: 0x0008, 0x959: 0x0008, 0x95a: 0x0008, 0x95b: 0x0008, 0x95c: 0x0ab1, 0x95d: 0x0008,
+ 0x95e: 0x0008, 0x95f: 0x0008, 0x960: 0x0008, 0x961: 0x0008, 0x962: 0x0008, 0x963: 0x0008,
+ 0x964: 0x0008, 0x965: 0x0008, 0x966: 0x0008, 0x967: 0x0008, 0x968: 0x0008, 0x969: 0x0ae9,
+ 0x96a: 0x0008, 0x96b: 0x0008, 0x96c: 0x0008, 0x96d: 0x0040, 0x96e: 0x0040, 0x96f: 0x0040,
+ 0x970: 0x0040, 0x971: 0x3308, 0x972: 0x3308, 0x973: 0x0b21, 0x974: 0x3308, 0x975: 0x0b59,
+ 0x976: 0x0b91, 0x977: 0x0bc9, 0x978: 0x0c19, 0x979: 0x0c51, 0x97a: 0x3308, 0x97b: 0x3308,
+ 0x97c: 0x3308, 0x97d: 0x3308, 0x97e: 0x3308, 0x97f: 0x3008,
+ // Block 0x26, offset 0x980
+ 0x980: 0x3308, 0x981: 0x0ca1, 0x982: 0x3308, 0x983: 0x3308, 0x984: 0x3b08, 0x985: 0x0018,
+ 0x986: 0x3308, 0x987: 0x3308, 0x988: 0x0008, 0x989: 0x0008, 0x98a: 0x0008, 0x98b: 0x0008,
+ 0x98c: 0x0008, 0x98d: 0x3308, 0x98e: 0x3308, 0x98f: 0x3308, 0x990: 0x3308, 0x991: 0x3308,
+ 0x992: 0x3308, 0x993: 0x0cd9, 0x994: 0x3308, 0x995: 0x3308, 0x996: 0x3308, 0x997: 0x3308,
+ 0x998: 0x0040, 0x999: 0x3308, 0x99a: 0x3308, 0x99b: 0x3308, 0x99c: 0x3308, 0x99d: 0x0d11,
+ 0x99e: 0x3308, 0x99f: 0x3308, 0x9a0: 0x3308, 0x9a1: 0x3308, 0x9a2: 0x0d49, 0x9a3: 0x3308,
+ 0x9a4: 0x3308, 0x9a5: 0x3308, 0x9a6: 0x3308, 0x9a7: 0x0d81, 0x9a8: 0x3308, 0x9a9: 0x3308,
+ 0x9aa: 0x3308, 0x9ab: 0x3308, 0x9ac: 0x0db9, 0x9ad: 0x3308, 0x9ae: 0x3308, 0x9af: 0x3308,
+ 0x9b0: 0x3308, 0x9b1: 0x3308, 0x9b2: 0x3308, 0x9b3: 0x3308, 0x9b4: 0x3308, 0x9b5: 0x3308,
+ 0x9b6: 0x3308, 0x9b7: 0x3308, 0x9b8: 0x3308, 0x9b9: 0x0df1, 0x9ba: 0x3308, 0x9bb: 0x3308,
+ 0x9bc: 0x3308, 0x9bd: 0x0040, 0x9be: 0x0018, 0x9bf: 0x0018,
+ // Block 0x27, offset 0x9c0
+ 0x9c0: 0x0008, 0x9c1: 0x0008, 0x9c2: 0x0008, 0x9c3: 0x0008, 0x9c4: 0x0008, 0x9c5: 0x0008,
+ 0x9c6: 0x0008, 0x9c7: 0x0008, 0x9c8: 0x0008, 0x9c9: 0x0008, 0x9ca: 0x0008, 0x9cb: 0x0008,
+ 0x9cc: 0x0008, 0x9cd: 0x0008, 0x9ce: 0x0008, 0x9cf: 0x0008, 0x9d0: 0x0008, 0x9d1: 0x0008,
+ 0x9d2: 0x0008, 0x9d3: 0x0008, 0x9d4: 0x0008, 0x9d5: 0x0008, 0x9d6: 0x0008, 0x9d7: 0x0008,
+ 0x9d8: 0x0008, 0x9d9: 0x0008, 0x9da: 0x0008, 0x9db: 0x0008, 0x9dc: 0x0008, 0x9dd: 0x0008,
+ 0x9de: 0x0008, 0x9df: 0x0008, 0x9e0: 0x0008, 0x9e1: 0x0008, 0x9e2: 0x0008, 0x9e3: 0x0008,
+ 0x9e4: 0x0008, 0x9e5: 0x0008, 0x9e6: 0x0008, 0x9e7: 0x0008, 0x9e8: 0x0008, 0x9e9: 0x0008,
+ 0x9ea: 0x0008, 0x9eb: 0x0008, 0x9ec: 0x0039, 0x9ed: 0x0ed1, 0x9ee: 0x0ee9, 0x9ef: 0x0008,
+ 0x9f0: 0x0ef9, 0x9f1: 0x0f09, 0x9f2: 0x0f19, 0x9f3: 0x0f31, 0x9f4: 0x0249, 0x9f5: 0x0f41,
+ 0x9f6: 0x0259, 0x9f7: 0x0f51, 0x9f8: 0x0359, 0x9f9: 0x0f61, 0x9fa: 0x0f71, 0x9fb: 0x0008,
+ 0x9fc: 0x00d9, 0x9fd: 0x0f81, 0x9fe: 0x0f99, 0x9ff: 0x0269,
+ // Block 0x28, offset 0xa00
+ 0xa00: 0x0fa9, 0xa01: 0x0fb9, 0xa02: 0x0279, 0xa03: 0x0039, 0xa04: 0x0fc9, 0xa05: 0x0fe1,
+ 0xa06: 0x05b5, 0xa07: 0x0ee9, 0xa08: 0x0ef9, 0xa09: 0x0f09, 0xa0a: 0x0ff9, 0xa0b: 0x1011,
+ 0xa0c: 0x1029, 0xa0d: 0x0f31, 0xa0e: 0x0008, 0xa0f: 0x0f51, 0xa10: 0x0f61, 0xa11: 0x1041,
+ 0xa12: 0x00d9, 0xa13: 0x1059, 0xa14: 0x05cd, 0xa15: 0x05cd, 0xa16: 0x0f99, 0xa17: 0x0fa9,
+ 0xa18: 0x0fb9, 0xa19: 0x05b5, 0xa1a: 0x1071, 0xa1b: 0x1089, 0xa1c: 0x05e5, 0xa1d: 0x1099,
+ 0xa1e: 0x10b1, 0xa1f: 0x10c9, 0xa20: 0x10e1, 0xa21: 0x10f9, 0xa22: 0x0f41, 0xa23: 0x0269,
+ 0xa24: 0x0fb9, 0xa25: 0x1089, 0xa26: 0x1099, 0xa27: 0x10b1, 0xa28: 0x1111, 0xa29: 0x10e1,
+ 0xa2a: 0x10f9, 0xa2b: 0x0008, 0xa2c: 0x0008, 0xa2d: 0x0008, 0xa2e: 0x0008, 0xa2f: 0x0008,
+ 0xa30: 0x0008, 0xa31: 0x0008, 0xa32: 0x0008, 0xa33: 0x0008, 0xa34: 0x0008, 0xa35: 0x0008,
+ 0xa36: 0x0008, 0xa37: 0x0008, 0xa38: 0x1129, 0xa39: 0x0008, 0xa3a: 0x0008, 0xa3b: 0x0008,
+ 0xa3c: 0x0008, 0xa3d: 0x0008, 0xa3e: 0x0008, 0xa3f: 0x0008,
+ // Block 0x29, offset 0xa40
+ 0xa40: 0x0008, 0xa41: 0x0008, 0xa42: 0x0008, 0xa43: 0x0008, 0xa44: 0x0008, 0xa45: 0x0008,
+ 0xa46: 0x0008, 0xa47: 0x0008, 0xa48: 0x0008, 0xa49: 0x0008, 0xa4a: 0x0008, 0xa4b: 0x0008,
+ 0xa4c: 0x0008, 0xa4d: 0x0008, 0xa4e: 0x0008, 0xa4f: 0x0008, 0xa50: 0x0008, 0xa51: 0x0008,
+ 0xa52: 0x0008, 0xa53: 0x0008, 0xa54: 0x0008, 0xa55: 0x0008, 0xa56: 0x0008, 0xa57: 0x0008,
+ 0xa58: 0x0008, 0xa59: 0x0008, 0xa5a: 0x0008, 0xa5b: 0x1141, 0xa5c: 0x1159, 0xa5d: 0x1169,
+ 0xa5e: 0x1181, 0xa5f: 0x1029, 0xa60: 0x1199, 0xa61: 0x11a9, 0xa62: 0x11c1, 0xa63: 0x11d9,
+ 0xa64: 0x11f1, 0xa65: 0x1209, 0xa66: 0x1221, 0xa67: 0x05fd, 0xa68: 0x1239, 0xa69: 0x1251,
+ 0xa6a: 0xe17d, 0xa6b: 0x1269, 0xa6c: 0x1281, 0xa6d: 0x1299, 0xa6e: 0x12b1, 0xa6f: 0x12c9,
+ 0xa70: 0x12e1, 0xa71: 0x12f9, 0xa72: 0x1311, 0xa73: 0x1329, 0xa74: 0x1341, 0xa75: 0x1359,
+ 0xa76: 0x1371, 0xa77: 0x1389, 0xa78: 0x0615, 0xa79: 0x13a1, 0xa7a: 0x13b9, 0xa7b: 0x13d1,
+ 0xa7c: 0x13e1, 0xa7d: 0x13f9, 0xa7e: 0x1411, 0xa7f: 0x1429,
+ // Block 0x2a, offset 0xa80
+ 0xa80: 0xe00d, 0xa81: 0x0008, 0xa82: 0xe00d, 0xa83: 0x0008, 0xa84: 0xe00d, 0xa85: 0x0008,
+ 0xa86: 0xe00d, 0xa87: 0x0008, 0xa88: 0xe00d, 0xa89: 0x0008, 0xa8a: 0xe00d, 0xa8b: 0x0008,
+ 0xa8c: 0xe00d, 0xa8d: 0x0008, 0xa8e: 0xe00d, 0xa8f: 0x0008, 0xa90: 0xe00d, 0xa91: 0x0008,
+ 0xa92: 0xe00d, 0xa93: 0x0008, 0xa94: 0xe00d, 0xa95: 0x0008, 0xa96: 0xe00d, 0xa97: 0x0008,
+ 0xa98: 0xe00d, 0xa99: 0x0008, 0xa9a: 0xe00d, 0xa9b: 0x0008, 0xa9c: 0xe00d, 0xa9d: 0x0008,
+ 0xa9e: 0xe00d, 0xa9f: 0x0008, 0xaa0: 0xe00d, 0xaa1: 0x0008, 0xaa2: 0xe00d, 0xaa3: 0x0008,
+ 0xaa4: 0xe00d, 0xaa5: 0x0008, 0xaa6: 0xe00d, 0xaa7: 0x0008, 0xaa8: 0xe00d, 0xaa9: 0x0008,
+ 0xaaa: 0xe00d, 0xaab: 0x0008, 0xaac: 0xe00d, 0xaad: 0x0008, 0xaae: 0xe00d, 0xaaf: 0x0008,
+ 0xab0: 0xe00d, 0xab1: 0x0008, 0xab2: 0xe00d, 0xab3: 0x0008, 0xab4: 0xe00d, 0xab5: 0x0008,
+ 0xab6: 0xe00d, 0xab7: 0x0008, 0xab8: 0xe00d, 0xab9: 0x0008, 0xaba: 0xe00d, 0xabb: 0x0008,
+ 0xabc: 0xe00d, 0xabd: 0x0008, 0xabe: 0xe00d, 0xabf: 0x0008,
+ // Block 0x2b, offset 0xac0
+ 0xac0: 0xe00d, 0xac1: 0x0008, 0xac2: 0xe00d, 0xac3: 0x0008, 0xac4: 0xe00d, 0xac5: 0x0008,
+ 0xac6: 0xe00d, 0xac7: 0x0008, 0xac8: 0xe00d, 0xac9: 0x0008, 0xaca: 0xe00d, 0xacb: 0x0008,
+ 0xacc: 0xe00d, 0xacd: 0x0008, 0xace: 0xe00d, 0xacf: 0x0008, 0xad0: 0xe00d, 0xad1: 0x0008,
+ 0xad2: 0xe00d, 0xad3: 0x0008, 0xad4: 0xe00d, 0xad5: 0x0008, 0xad6: 0x0008, 0xad7: 0x0008,
+ 0xad8: 0x0008, 0xad9: 0x0008, 0xada: 0x062d, 0xadb: 0x064d, 0xadc: 0x0008, 0xadd: 0x0008,
+ 0xade: 0x1441, 0xadf: 0x0008, 0xae0: 0xe00d, 0xae1: 0x0008, 0xae2: 0xe00d, 0xae3: 0x0008,
+ 0xae4: 0xe00d, 0xae5: 0x0008, 0xae6: 0xe00d, 0xae7: 0x0008, 0xae8: 0xe00d, 0xae9: 0x0008,
+ 0xaea: 0xe00d, 0xaeb: 0x0008, 0xaec: 0xe00d, 0xaed: 0x0008, 0xaee: 0xe00d, 0xaef: 0x0008,
+ 0xaf0: 0xe00d, 0xaf1: 0x0008, 0xaf2: 0xe00d, 0xaf3: 0x0008, 0xaf4: 0xe00d, 0xaf5: 0x0008,
+ 0xaf6: 0xe00d, 0xaf7: 0x0008, 0xaf8: 0xe00d, 0xaf9: 0x0008, 0xafa: 0xe00d, 0xafb: 0x0008,
+ 0xafc: 0xe00d, 0xafd: 0x0008, 0xafe: 0xe00d, 0xaff: 0x0008,
+ // Block 0x2c, offset 0xb00
+ 0xb00: 0x0008, 0xb01: 0x0008, 0xb02: 0x0008, 0xb03: 0x0008, 0xb04: 0x0008, 0xb05: 0x0008,
+ 0xb06: 0x0040, 0xb07: 0x0040, 0xb08: 0xe045, 0xb09: 0xe045, 0xb0a: 0xe045, 0xb0b: 0xe045,
+ 0xb0c: 0xe045, 0xb0d: 0xe045, 0xb0e: 0x0040, 0xb0f: 0x0040, 0xb10: 0x0008, 0xb11: 0x0008,
+ 0xb12: 0x0008, 0xb13: 0x0008, 0xb14: 0x0008, 0xb15: 0x0008, 0xb16: 0x0008, 0xb17: 0x0008,
+ 0xb18: 0x0040, 0xb19: 0xe045, 0xb1a: 0x0040, 0xb1b: 0xe045, 0xb1c: 0x0040, 0xb1d: 0xe045,
+ 0xb1e: 0x0040, 0xb1f: 0xe045, 0xb20: 0x0008, 0xb21: 0x0008, 0xb22: 0x0008, 0xb23: 0x0008,
+ 0xb24: 0x0008, 0xb25: 0x0008, 0xb26: 0x0008, 0xb27: 0x0008, 0xb28: 0xe045, 0xb29: 0xe045,
+ 0xb2a: 0xe045, 0xb2b: 0xe045, 0xb2c: 0xe045, 0xb2d: 0xe045, 0xb2e: 0xe045, 0xb2f: 0xe045,
+ 0xb30: 0x0008, 0xb31: 0x1459, 0xb32: 0x0008, 0xb33: 0x1471, 0xb34: 0x0008, 0xb35: 0x1489,
+ 0xb36: 0x0008, 0xb37: 0x14a1, 0xb38: 0x0008, 0xb39: 0x14b9, 0xb3a: 0x0008, 0xb3b: 0x14d1,
+ 0xb3c: 0x0008, 0xb3d: 0x14e9, 0xb3e: 0x0040, 0xb3f: 0x0040,
+ // Block 0x2d, offset 0xb40
+ 0xb40: 0x1501, 0xb41: 0x1531, 0xb42: 0x1561, 0xb43: 0x1591, 0xb44: 0x15c1, 0xb45: 0x15f1,
+ 0xb46: 0x1621, 0xb47: 0x1651, 0xb48: 0x1501, 0xb49: 0x1531, 0xb4a: 0x1561, 0xb4b: 0x1591,
+ 0xb4c: 0x15c1, 0xb4d: 0x15f1, 0xb4e: 0x1621, 0xb4f: 0x1651, 0xb50: 0x1681, 0xb51: 0x16b1,
+ 0xb52: 0x16e1, 0xb53: 0x1711, 0xb54: 0x1741, 0xb55: 0x1771, 0xb56: 0x17a1, 0xb57: 0x17d1,
+ 0xb58: 0x1681, 0xb59: 0x16b1, 0xb5a: 0x16e1, 0xb5b: 0x1711, 0xb5c: 0x1741, 0xb5d: 0x1771,
+ 0xb5e: 0x17a1, 0xb5f: 0x17d1, 0xb60: 0x1801, 0xb61: 0x1831, 0xb62: 0x1861, 0xb63: 0x1891,
+ 0xb64: 0x18c1, 0xb65: 0x18f1, 0xb66: 0x1921, 0xb67: 0x1951, 0xb68: 0x1801, 0xb69: 0x1831,
+ 0xb6a: 0x1861, 0xb6b: 0x1891, 0xb6c: 0x18c1, 0xb6d: 0x18f1, 0xb6e: 0x1921, 0xb6f: 0x1951,
+ 0xb70: 0x0008, 0xb71: 0x0008, 0xb72: 0x1981, 0xb73: 0x19b1, 0xb74: 0x19d9, 0xb75: 0x0040,
+ 0xb76: 0x0008, 0xb77: 0x1a01, 0xb78: 0xe045, 0xb79: 0xe045, 0xb7a: 0x0665, 0xb7b: 0x1459,
+ 0xb7c: 0x19b1, 0xb7d: 0x067e, 0xb7e: 0x1a31, 0xb7f: 0x069e,
+ // Block 0x2e, offset 0xb80
+ 0xb80: 0x06be, 0xb81: 0x1a4a, 0xb82: 0x1a79, 0xb83: 0x1aa9, 0xb84: 0x1ad1, 0xb85: 0x0040,
+ 0xb86: 0x0008, 0xb87: 0x1af9, 0xb88: 0x06dd, 0xb89: 0x1471, 0xb8a: 0x06f5, 0xb8b: 0x1489,
+ 0xb8c: 0x1aa9, 0xb8d: 0x1b2a, 0xb8e: 0x1b5a, 0xb8f: 0x1b8a, 0xb90: 0x0008, 0xb91: 0x0008,
+ 0xb92: 0x0008, 0xb93: 0x1bb9, 0xb94: 0x0040, 0xb95: 0x0040, 0xb96: 0x0008, 0xb97: 0x0008,
+ 0xb98: 0xe045, 0xb99: 0xe045, 0xb9a: 0x070d, 0xb9b: 0x14a1, 0xb9c: 0x0040, 0xb9d: 0x1bd2,
+ 0xb9e: 0x1c02, 0xb9f: 0x1c32, 0xba0: 0x0008, 0xba1: 0x0008, 0xba2: 0x0008, 0xba3: 0x1c61,
+ 0xba4: 0x0008, 0xba5: 0x0008, 0xba6: 0x0008, 0xba7: 0x0008, 0xba8: 0xe045, 0xba9: 0xe045,
+ 0xbaa: 0x0725, 0xbab: 0x14d1, 0xbac: 0xe04d, 0xbad: 0x1c7a, 0xbae: 0x03d2, 0xbaf: 0x1caa,
+ 0xbb0: 0x0040, 0xbb1: 0x0040, 0xbb2: 0x1cb9, 0xbb3: 0x1ce9, 0xbb4: 0x1d11, 0xbb5: 0x0040,
+ 0xbb6: 0x0008, 0xbb7: 0x1d39, 0xbb8: 0x073d, 0xbb9: 0x14b9, 0xbba: 0x0515, 0xbbb: 0x14e9,
+ 0xbbc: 0x1ce9, 0xbbd: 0x0756, 0xbbe: 0x0776, 0xbbf: 0x0040,
+ // Block 0x2f, offset 0xbc0
+ 0xbc0: 0x000a, 0xbc1: 0x000a, 0xbc2: 0x000a, 0xbc3: 0x000a, 0xbc4: 0x000a, 0xbc5: 0x000a,
+ 0xbc6: 0x000a, 0xbc7: 0x000a, 0xbc8: 0x000a, 0xbc9: 0x000a, 0xbca: 0x000a, 0xbcb: 0x03c0,
+ 0xbcc: 0x0003, 0xbcd: 0x0003, 0xbce: 0x0340, 0xbcf: 0x0b40, 0xbd0: 0x0018, 0xbd1: 0xe00d,
+ 0xbd2: 0x0018, 0xbd3: 0x0018, 0xbd4: 0x0018, 0xbd5: 0x0018, 0xbd6: 0x0018, 0xbd7: 0x0796,
+ 0xbd8: 0x0018, 0xbd9: 0x0018, 0xbda: 0x0018, 0xbdb: 0x0018, 0xbdc: 0x0018, 0xbdd: 0x0018,
+ 0xbde: 0x0018, 0xbdf: 0x0018, 0xbe0: 0x0018, 0xbe1: 0x0018, 0xbe2: 0x0018, 0xbe3: 0x0018,
+ 0xbe4: 0x0040, 0xbe5: 0x0040, 0xbe6: 0x0040, 0xbe7: 0x0018, 0xbe8: 0x0040, 0xbe9: 0x0040,
+ 0xbea: 0x0340, 0xbeb: 0x0340, 0xbec: 0x0340, 0xbed: 0x0340, 0xbee: 0x0340, 0xbef: 0x000a,
+ 0xbf0: 0x0018, 0xbf1: 0x0018, 0xbf2: 0x0018, 0xbf3: 0x1d69, 0xbf4: 0x1da1, 0xbf5: 0x0018,
+ 0xbf6: 0x1df1, 0xbf7: 0x1e29, 0xbf8: 0x0018, 0xbf9: 0x0018, 0xbfa: 0x0018, 0xbfb: 0x0018,
+ 0xbfc: 0x1e7a, 0xbfd: 0x0018, 0xbfe: 0x07b6, 0xbff: 0x0018,
+ // Block 0x30, offset 0xc00
+ 0xc00: 0x0018, 0xc01: 0x0018, 0xc02: 0x0018, 0xc03: 0x0018, 0xc04: 0x0018, 0xc05: 0x0018,
+ 0xc06: 0x0018, 0xc07: 0x1e92, 0xc08: 0x1eaa, 0xc09: 0x1ec2, 0xc0a: 0x0018, 0xc0b: 0x0018,
+ 0xc0c: 0x0018, 0xc0d: 0x0018, 0xc0e: 0x0018, 0xc0f: 0x0018, 0xc10: 0x0018, 0xc11: 0x0018,
+ 0xc12: 0x0018, 0xc13: 0x0018, 0xc14: 0x0018, 0xc15: 0x0018, 0xc16: 0x0018, 0xc17: 0x1ed9,
+ 0xc18: 0x0018, 0xc19: 0x0018, 0xc1a: 0x0018, 0xc1b: 0x0018, 0xc1c: 0x0018, 0xc1d: 0x0018,
+ 0xc1e: 0x0018, 0xc1f: 0x000a, 0xc20: 0x03c0, 0xc21: 0x0340, 0xc22: 0x0340, 0xc23: 0x0340,
+ 0xc24: 0x03c0, 0xc25: 0x0040, 0xc26: 0x0040, 0xc27: 0x0040, 0xc28: 0x0040, 0xc29: 0x0040,
+ 0xc2a: 0x0340, 0xc2b: 0x0340, 0xc2c: 0x0340, 0xc2d: 0x0340, 0xc2e: 0x0340, 0xc2f: 0x0340,
+ 0xc30: 0x1f41, 0xc31: 0x0f41, 0xc32: 0x0040, 0xc33: 0x0040, 0xc34: 0x1f51, 0xc35: 0x1f61,
+ 0xc36: 0x1f71, 0xc37: 0x1f81, 0xc38: 0x1f91, 0xc39: 0x1fa1, 0xc3a: 0x1fb2, 0xc3b: 0x07d5,
+ 0xc3c: 0x1fc2, 0xc3d: 0x1fd2, 0xc3e: 0x1fe2, 0xc3f: 0x0f71,
+ // Block 0x31, offset 0xc40
+ 0xc40: 0x1f41, 0xc41: 0x00c9, 0xc42: 0x0069, 0xc43: 0x0079, 0xc44: 0x1f51, 0xc45: 0x1f61,
+ 0xc46: 0x1f71, 0xc47: 0x1f81, 0xc48: 0x1f91, 0xc49: 0x1fa1, 0xc4a: 0x1fb2, 0xc4b: 0x07ed,
+ 0xc4c: 0x1fc2, 0xc4d: 0x1fd2, 0xc4e: 0x1fe2, 0xc4f: 0x0040, 0xc50: 0x0039, 0xc51: 0x0f09,
+ 0xc52: 0x00d9, 0xc53: 0x0369, 0xc54: 0x0ff9, 0xc55: 0x0249, 0xc56: 0x0f51, 0xc57: 0x0359,
+ 0xc58: 0x0f61, 0xc59: 0x0f71, 0xc5a: 0x0f99, 0xc5b: 0x01d9, 0xc5c: 0x0fa9, 0xc5d: 0x0040,
+ 0xc5e: 0x0040, 0xc5f: 0x0040, 0xc60: 0x0018, 0xc61: 0x0018, 0xc62: 0x0018, 0xc63: 0x0018,
+ 0xc64: 0x0018, 0xc65: 0x0018, 0xc66: 0x0018, 0xc67: 0x0018, 0xc68: 0x1ff1, 0xc69: 0x0018,
+ 0xc6a: 0x0018, 0xc6b: 0x0018, 0xc6c: 0x0018, 0xc6d: 0x0018, 0xc6e: 0x0018, 0xc6f: 0x0018,
+ 0xc70: 0x0018, 0xc71: 0x0018, 0xc72: 0x0018, 0xc73: 0x0018, 0xc74: 0x0018, 0xc75: 0x0018,
+ 0xc76: 0x0018, 0xc77: 0x0018, 0xc78: 0x0018, 0xc79: 0x0018, 0xc7a: 0x0018, 0xc7b: 0x0018,
+ 0xc7c: 0x0018, 0xc7d: 0x0018, 0xc7e: 0x0018, 0xc7f: 0x0018,
+ // Block 0x32, offset 0xc80
+ 0xc80: 0x0806, 0xc81: 0x0826, 0xc82: 0x1159, 0xc83: 0x0845, 0xc84: 0x0018, 0xc85: 0x0866,
+ 0xc86: 0x0886, 0xc87: 0x1011, 0xc88: 0x0018, 0xc89: 0x08a5, 0xc8a: 0x0f31, 0xc8b: 0x0249,
+ 0xc8c: 0x0249, 0xc8d: 0x0249, 0xc8e: 0x0249, 0xc8f: 0x2009, 0xc90: 0x0f41, 0xc91: 0x0f41,
+ 0xc92: 0x0359, 0xc93: 0x0359, 0xc94: 0x0018, 0xc95: 0x0f71, 0xc96: 0x2021, 0xc97: 0x0018,
+ 0xc98: 0x0018, 0xc99: 0x0f99, 0xc9a: 0x2039, 0xc9b: 0x0269, 0xc9c: 0x0269, 0xc9d: 0x0269,
+ 0xc9e: 0x0018, 0xc9f: 0x0018, 0xca0: 0x2049, 0xca1: 0x08c5, 0xca2: 0x2061, 0xca3: 0x0018,
+ 0xca4: 0x13d1, 0xca5: 0x0018, 0xca6: 0x2079, 0xca7: 0x0018, 0xca8: 0x13d1, 0xca9: 0x0018,
+ 0xcaa: 0x0f51, 0xcab: 0x2091, 0xcac: 0x0ee9, 0xcad: 0x1159, 0xcae: 0x0018, 0xcaf: 0x0f09,
+ 0xcb0: 0x0f09, 0xcb1: 0x1199, 0xcb2: 0x0040, 0xcb3: 0x0f61, 0xcb4: 0x00d9, 0xcb5: 0x20a9,
+ 0xcb6: 0x20c1, 0xcb7: 0x20d9, 0xcb8: 0x20f1, 0xcb9: 0x0f41, 0xcba: 0x0018, 0xcbb: 0x08e5,
+ 0xcbc: 0x2109, 0xcbd: 0x10b1, 0xcbe: 0x10b1, 0xcbf: 0x2109,
+ // Block 0x33, offset 0xcc0
+ 0xcc0: 0x0905, 0xcc1: 0x0018, 0xcc2: 0x0018, 0xcc3: 0x0018, 0xcc4: 0x0018, 0xcc5: 0x0ef9,
+ 0xcc6: 0x0ef9, 0xcc7: 0x0f09, 0xcc8: 0x0f41, 0xcc9: 0x0259, 0xcca: 0x0018, 0xccb: 0x0018,
+ 0xccc: 0x0018, 0xccd: 0x0018, 0xcce: 0x0008, 0xccf: 0x0018, 0xcd0: 0x2121, 0xcd1: 0x2151,
+ 0xcd2: 0x2181, 0xcd3: 0x21b9, 0xcd4: 0x21e9, 0xcd5: 0x2219, 0xcd6: 0x2249, 0xcd7: 0x2279,
+ 0xcd8: 0x22a9, 0xcd9: 0x22d9, 0xcda: 0x2309, 0xcdb: 0x2339, 0xcdc: 0x2369, 0xcdd: 0x2399,
+ 0xcde: 0x23c9, 0xcdf: 0x23f9, 0xce0: 0x0f41, 0xce1: 0x2421, 0xce2: 0x091d, 0xce3: 0x2439,
+ 0xce4: 0x1089, 0xce5: 0x2451, 0xce6: 0x093d, 0xce7: 0x2469, 0xce8: 0x2491, 0xce9: 0x0369,
+ 0xcea: 0x24a9, 0xceb: 0x095d, 0xcec: 0x0359, 0xced: 0x1159, 0xcee: 0x0ef9, 0xcef: 0x0f61,
+ 0xcf0: 0x0f41, 0xcf1: 0x2421, 0xcf2: 0x097d, 0xcf3: 0x2439, 0xcf4: 0x1089, 0xcf5: 0x2451,
+ 0xcf6: 0x099d, 0xcf7: 0x2469, 0xcf8: 0x2491, 0xcf9: 0x0369, 0xcfa: 0x24a9, 0xcfb: 0x09bd,
+ 0xcfc: 0x0359, 0xcfd: 0x1159, 0xcfe: 0x0ef9, 0xcff: 0x0f61,
+ // Block 0x34, offset 0xd00
+ 0xd00: 0x0018, 0xd01: 0x0018, 0xd02: 0x0018, 0xd03: 0x0018, 0xd04: 0x0018, 0xd05: 0x0018,
+ 0xd06: 0x0018, 0xd07: 0x0018, 0xd08: 0x0018, 0xd09: 0x0018, 0xd0a: 0x0018, 0xd0b: 0x0040,
+ 0xd0c: 0x0040, 0xd0d: 0x0040, 0xd0e: 0x0040, 0xd0f: 0x0040, 0xd10: 0x0040, 0xd11: 0x0040,
+ 0xd12: 0x0040, 0xd13: 0x0040, 0xd14: 0x0040, 0xd15: 0x0040, 0xd16: 0x0040, 0xd17: 0x0040,
+ 0xd18: 0x0040, 0xd19: 0x0040, 0xd1a: 0x0040, 0xd1b: 0x0040, 0xd1c: 0x0040, 0xd1d: 0x0040,
+ 0xd1e: 0x0040, 0xd1f: 0x0040, 0xd20: 0x00c9, 0xd21: 0x0069, 0xd22: 0x0079, 0xd23: 0x1f51,
+ 0xd24: 0x1f61, 0xd25: 0x1f71, 0xd26: 0x1f81, 0xd27: 0x1f91, 0xd28: 0x1fa1, 0xd29: 0x2601,
+ 0xd2a: 0x2619, 0xd2b: 0x2631, 0xd2c: 0x2649, 0xd2d: 0x2661, 0xd2e: 0x2679, 0xd2f: 0x2691,
+ 0xd30: 0x26a9, 0xd31: 0x26c1, 0xd32: 0x26d9, 0xd33: 0x26f1, 0xd34: 0x0a1e, 0xd35: 0x0a3e,
+ 0xd36: 0x0a5e, 0xd37: 0x0a7e, 0xd38: 0x0a9e, 0xd39: 0x0abe, 0xd3a: 0x0ade, 0xd3b: 0x0afe,
+ 0xd3c: 0x0b1e, 0xd3d: 0x270a, 0xd3e: 0x2732, 0xd3f: 0x275a,
+ // Block 0x35, offset 0xd40
+ 0xd40: 0x2782, 0xd41: 0x27aa, 0xd42: 0x27d2, 0xd43: 0x27fa, 0xd44: 0x2822, 0xd45: 0x284a,
+ 0xd46: 0x2872, 0xd47: 0x289a, 0xd48: 0x0040, 0xd49: 0x0040, 0xd4a: 0x0040, 0xd4b: 0x0040,
+ 0xd4c: 0x0040, 0xd4d: 0x0040, 0xd4e: 0x0040, 0xd4f: 0x0040, 0xd50: 0x0040, 0xd51: 0x0040,
+ 0xd52: 0x0040, 0xd53: 0x0040, 0xd54: 0x0040, 0xd55: 0x0040, 0xd56: 0x0040, 0xd57: 0x0040,
+ 0xd58: 0x0040, 0xd59: 0x0040, 0xd5a: 0x0040, 0xd5b: 0x0040, 0xd5c: 0x0b3e, 0xd5d: 0x0b5e,
+ 0xd5e: 0x0b7e, 0xd5f: 0x0b9e, 0xd60: 0x0bbe, 0xd61: 0x0bde, 0xd62: 0x0bfe, 0xd63: 0x0c1e,
+ 0xd64: 0x0c3e, 0xd65: 0x0c5e, 0xd66: 0x0c7e, 0xd67: 0x0c9e, 0xd68: 0x0cbe, 0xd69: 0x0cde,
+ 0xd6a: 0x0cfe, 0xd6b: 0x0d1e, 0xd6c: 0x0d3e, 0xd6d: 0x0d5e, 0xd6e: 0x0d7e, 0xd6f: 0x0d9e,
+ 0xd70: 0x0dbe, 0xd71: 0x0dde, 0xd72: 0x0dfe, 0xd73: 0x0e1e, 0xd74: 0x0e3e, 0xd75: 0x0e5e,
+ 0xd76: 0x0039, 0xd77: 0x0ee9, 0xd78: 0x1159, 0xd79: 0x0ef9, 0xd7a: 0x0f09, 0xd7b: 0x1199,
+ 0xd7c: 0x0f31, 0xd7d: 0x0249, 0xd7e: 0x0f41, 0xd7f: 0x0259,
+ // Block 0x36, offset 0xd80
+ 0xd80: 0x0f51, 0xd81: 0x0359, 0xd82: 0x0f61, 0xd83: 0x0f71, 0xd84: 0x00d9, 0xd85: 0x0f99,
+ 0xd86: 0x2039, 0xd87: 0x0269, 0xd88: 0x01d9, 0xd89: 0x0fa9, 0xd8a: 0x0fb9, 0xd8b: 0x1089,
+ 0xd8c: 0x0279, 0xd8d: 0x0369, 0xd8e: 0x0289, 0xd8f: 0x13d1, 0xd90: 0x0039, 0xd91: 0x0ee9,
+ 0xd92: 0x1159, 0xd93: 0x0ef9, 0xd94: 0x0f09, 0xd95: 0x1199, 0xd96: 0x0f31, 0xd97: 0x0249,
+ 0xd98: 0x0f41, 0xd99: 0x0259, 0xd9a: 0x0f51, 0xd9b: 0x0359, 0xd9c: 0x0f61, 0xd9d: 0x0f71,
+ 0xd9e: 0x00d9, 0xd9f: 0x0f99, 0xda0: 0x2039, 0xda1: 0x0269, 0xda2: 0x01d9, 0xda3: 0x0fa9,
+ 0xda4: 0x0fb9, 0xda5: 0x1089, 0xda6: 0x0279, 0xda7: 0x0369, 0xda8: 0x0289, 0xda9: 0x13d1,
+ 0xdaa: 0x1f41, 0xdab: 0x0018, 0xdac: 0x0018, 0xdad: 0x0018, 0xdae: 0x0018, 0xdaf: 0x0018,
+ 0xdb0: 0x0018, 0xdb1: 0x0018, 0xdb2: 0x0018, 0xdb3: 0x0018, 0xdb4: 0x0018, 0xdb5: 0x0018,
+ 0xdb6: 0x0018, 0xdb7: 0x0018, 0xdb8: 0x0018, 0xdb9: 0x0018, 0xdba: 0x0018, 0xdbb: 0x0018,
+ 0xdbc: 0x0018, 0xdbd: 0x0018, 0xdbe: 0x0018, 0xdbf: 0x0018,
+ // Block 0x37, offset 0xdc0
+ 0xdc0: 0x0008, 0xdc1: 0x0008, 0xdc2: 0x0008, 0xdc3: 0x0008, 0xdc4: 0x0008, 0xdc5: 0x0008,
+ 0xdc6: 0x0008, 0xdc7: 0x0008, 0xdc8: 0x0008, 0xdc9: 0x0008, 0xdca: 0x0008, 0xdcb: 0x0008,
+ 0xdcc: 0x0008, 0xdcd: 0x0008, 0xdce: 0x0008, 0xdcf: 0x0008, 0xdd0: 0x0008, 0xdd1: 0x0008,
+ 0xdd2: 0x0008, 0xdd3: 0x0008, 0xdd4: 0x0008, 0xdd5: 0x0008, 0xdd6: 0x0008, 0xdd7: 0x0008,
+ 0xdd8: 0x0008, 0xdd9: 0x0008, 0xdda: 0x0008, 0xddb: 0x0008, 0xddc: 0x0008, 0xddd: 0x0008,
+ 0xdde: 0x0008, 0xddf: 0x0040, 0xde0: 0xe00d, 0xde1: 0x0008, 0xde2: 0x2971, 0xde3: 0x0ed5,
+ 0xde4: 0x2989, 0xde5: 0x0008, 0xde6: 0x0008, 0xde7: 0xe07d, 0xde8: 0x0008, 0xde9: 0xe01d,
+ 0xdea: 0x0008, 0xdeb: 0xe03d, 0xdec: 0x0008, 0xded: 0x0fe1, 0xdee: 0x1281, 0xdef: 0x0fc9,
+ 0xdf0: 0x1141, 0xdf1: 0x0008, 0xdf2: 0xe00d, 0xdf3: 0x0008, 0xdf4: 0x0008, 0xdf5: 0xe01d,
+ 0xdf6: 0x0008, 0xdf7: 0x0008, 0xdf8: 0x0008, 0xdf9: 0x0008, 0xdfa: 0x0008, 0xdfb: 0x0008,
+ 0xdfc: 0x0259, 0xdfd: 0x1089, 0xdfe: 0x29a1, 0xdff: 0x29b9,
+ // Block 0x38, offset 0xe00
+ 0xe00: 0xe00d, 0xe01: 0x0008, 0xe02: 0xe00d, 0xe03: 0x0008, 0xe04: 0xe00d, 0xe05: 0x0008,
+ 0xe06: 0xe00d, 0xe07: 0x0008, 0xe08: 0xe00d, 0xe09: 0x0008, 0xe0a: 0xe00d, 0xe0b: 0x0008,
+ 0xe0c: 0xe00d, 0xe0d: 0x0008, 0xe0e: 0xe00d, 0xe0f: 0x0008, 0xe10: 0xe00d, 0xe11: 0x0008,
+ 0xe12: 0xe00d, 0xe13: 0x0008, 0xe14: 0xe00d, 0xe15: 0x0008, 0xe16: 0xe00d, 0xe17: 0x0008,
+ 0xe18: 0xe00d, 0xe19: 0x0008, 0xe1a: 0xe00d, 0xe1b: 0x0008, 0xe1c: 0xe00d, 0xe1d: 0x0008,
+ 0xe1e: 0xe00d, 0xe1f: 0x0008, 0xe20: 0xe00d, 0xe21: 0x0008, 0xe22: 0xe00d, 0xe23: 0x0008,
+ 0xe24: 0x0008, 0xe25: 0x0018, 0xe26: 0x0018, 0xe27: 0x0018, 0xe28: 0x0018, 0xe29: 0x0018,
+ 0xe2a: 0x0018, 0xe2b: 0xe03d, 0xe2c: 0x0008, 0xe2d: 0xe01d, 0xe2e: 0x0008, 0xe2f: 0x3308,
+ 0xe30: 0x3308, 0xe31: 0x3308, 0xe32: 0xe00d, 0xe33: 0x0008, 0xe34: 0x0040, 0xe35: 0x0040,
+ 0xe36: 0x0040, 0xe37: 0x0040, 0xe38: 0x0040, 0xe39: 0x0018, 0xe3a: 0x0018, 0xe3b: 0x0018,
+ 0xe3c: 0x0018, 0xe3d: 0x0018, 0xe3e: 0x0018, 0xe3f: 0x0018,
+ // Block 0x39, offset 0xe40
+ 0xe40: 0x2715, 0xe41: 0x2735, 0xe42: 0x2755, 0xe43: 0x2775, 0xe44: 0x2795, 0xe45: 0x27b5,
+ 0xe46: 0x27d5, 0xe47: 0x27f5, 0xe48: 0x2815, 0xe49: 0x2835, 0xe4a: 0x2855, 0xe4b: 0x2875,
+ 0xe4c: 0x2895, 0xe4d: 0x28b5, 0xe4e: 0x28d5, 0xe4f: 0x28f5, 0xe50: 0x2915, 0xe51: 0x2935,
+ 0xe52: 0x2955, 0xe53: 0x2975, 0xe54: 0x2995, 0xe55: 0x29b5, 0xe56: 0x0040, 0xe57: 0x0040,
+ 0xe58: 0x0040, 0xe59: 0x0040, 0xe5a: 0x0040, 0xe5b: 0x0040, 0xe5c: 0x0040, 0xe5d: 0x0040,
+ 0xe5e: 0x0040, 0xe5f: 0x0040, 0xe60: 0x0040, 0xe61: 0x0040, 0xe62: 0x0040, 0xe63: 0x0040,
+ 0xe64: 0x0040, 0xe65: 0x0040, 0xe66: 0x0040, 0xe67: 0x0040, 0xe68: 0x0040, 0xe69: 0x0040,
+ 0xe6a: 0x0040, 0xe6b: 0x0040, 0xe6c: 0x0040, 0xe6d: 0x0040, 0xe6e: 0x0040, 0xe6f: 0x0040,
+ 0xe70: 0x0040, 0xe71: 0x0040, 0xe72: 0x0040, 0xe73: 0x0040, 0xe74: 0x0040, 0xe75: 0x0040,
+ 0xe76: 0x0040, 0xe77: 0x0040, 0xe78: 0x0040, 0xe79: 0x0040, 0xe7a: 0x0040, 0xe7b: 0x0040,
+ 0xe7c: 0x0040, 0xe7d: 0x0040, 0xe7e: 0x0040, 0xe7f: 0x0040,
+ // Block 0x3a, offset 0xe80
+ 0xe80: 0x000a, 0xe81: 0x0018, 0xe82: 0x29d1, 0xe83: 0x0018, 0xe84: 0x0018, 0xe85: 0x0008,
+ 0xe86: 0x0008, 0xe87: 0x0008, 0xe88: 0x0018, 0xe89: 0x0018, 0xe8a: 0x0018, 0xe8b: 0x0018,
+ 0xe8c: 0x0018, 0xe8d: 0x0018, 0xe8e: 0x0018, 0xe8f: 0x0018, 0xe90: 0x0018, 0xe91: 0x0018,
+ 0xe92: 0x0018, 0xe93: 0x0018, 0xe94: 0x0018, 0xe95: 0x0018, 0xe96: 0x0018, 0xe97: 0x0018,
+ 0xe98: 0x0018, 0xe99: 0x0018, 0xe9a: 0x0018, 0xe9b: 0x0018, 0xe9c: 0x0018, 0xe9d: 0x0018,
+ 0xe9e: 0x0018, 0xe9f: 0x0018, 0xea0: 0x0018, 0xea1: 0x0018, 0xea2: 0x0018, 0xea3: 0x0018,
+ 0xea4: 0x0018, 0xea5: 0x0018, 0xea6: 0x0018, 0xea7: 0x0018, 0xea8: 0x0018, 0xea9: 0x0018,
+ 0xeaa: 0x3308, 0xeab: 0x3308, 0xeac: 0x3308, 0xead: 0x3308, 0xeae: 0x3018, 0xeaf: 0x3018,
+ 0xeb0: 0x0018, 0xeb1: 0x0018, 0xeb2: 0x0018, 0xeb3: 0x0018, 0xeb4: 0x0018, 0xeb5: 0x0018,
+ 0xeb6: 0xe125, 0xeb7: 0x0018, 0xeb8: 0x29d5, 0xeb9: 0x29f5, 0xeba: 0x2a15, 0xebb: 0x0018,
+ 0xebc: 0x0008, 0xebd: 0x0018, 0xebe: 0x0018, 0xebf: 0x0018,
+ // Block 0x3b, offset 0xec0
+ 0xec0: 0x2b55, 0xec1: 0x2b75, 0xec2: 0x2b95, 0xec3: 0x2bb5, 0xec4: 0x2bd5, 0xec5: 0x2bf5,
+ 0xec6: 0x2bf5, 0xec7: 0x2bf5, 0xec8: 0x2c15, 0xec9: 0x2c15, 0xeca: 0x2c15, 0xecb: 0x2c15,
+ 0xecc: 0x2c35, 0xecd: 0x2c35, 0xece: 0x2c35, 0xecf: 0x2c55, 0xed0: 0x2c75, 0xed1: 0x2c75,
+ 0xed2: 0x2a95, 0xed3: 0x2a95, 0xed4: 0x2c75, 0xed5: 0x2c75, 0xed6: 0x2c95, 0xed7: 0x2c95,
+ 0xed8: 0x2c75, 0xed9: 0x2c75, 0xeda: 0x2a95, 0xedb: 0x2a95, 0xedc: 0x2c75, 0xedd: 0x2c75,
+ 0xede: 0x2c55, 0xedf: 0x2c55, 0xee0: 0x2cb5, 0xee1: 0x2cb5, 0xee2: 0x2cd5, 0xee3: 0x2cd5,
+ 0xee4: 0x0040, 0xee5: 0x2cf5, 0xee6: 0x2d15, 0xee7: 0x2d35, 0xee8: 0x2d35, 0xee9: 0x2d55,
+ 0xeea: 0x2d75, 0xeeb: 0x2d95, 0xeec: 0x2db5, 0xeed: 0x2dd5, 0xeee: 0x2df5, 0xeef: 0x2e15,
+ 0xef0: 0x2e35, 0xef1: 0x2e55, 0xef2: 0x2e55, 0xef3: 0x2e75, 0xef4: 0x2e95, 0xef5: 0x2e95,
+ 0xef6: 0x2eb5, 0xef7: 0x2ed5, 0xef8: 0x2e75, 0xef9: 0x2ef5, 0xefa: 0x2f15, 0xefb: 0x2ef5,
+ 0xefc: 0x2e75, 0xefd: 0x2f35, 0xefe: 0x2f55, 0xeff: 0x2f75,
+ // Block 0x3c, offset 0xf00
+ 0xf00: 0x2f95, 0xf01: 0x2fb5, 0xf02: 0x2d15, 0xf03: 0x2cf5, 0xf04: 0x2fd5, 0xf05: 0x2ff5,
+ 0xf06: 0x3015, 0xf07: 0x3035, 0xf08: 0x3055, 0xf09: 0x3075, 0xf0a: 0x3095, 0xf0b: 0x30b5,
+ 0xf0c: 0x30d5, 0xf0d: 0x30f5, 0xf0e: 0x3115, 0xf0f: 0x0040, 0xf10: 0x0018, 0xf11: 0x0018,
+ 0xf12: 0x3135, 0xf13: 0x3155, 0xf14: 0x3175, 0xf15: 0x3195, 0xf16: 0x31b5, 0xf17: 0x31d5,
+ 0xf18: 0x31f5, 0xf19: 0x3215, 0xf1a: 0x3235, 0xf1b: 0x3255, 0xf1c: 0x3175, 0xf1d: 0x3275,
+ 0xf1e: 0x3295, 0xf1f: 0x32b5, 0xf20: 0x0008, 0xf21: 0x0008, 0xf22: 0x0008, 0xf23: 0x0008,
+ 0xf24: 0x0008, 0xf25: 0x0008, 0xf26: 0x0008, 0xf27: 0x0008, 0xf28: 0x0008, 0xf29: 0x0008,
+ 0xf2a: 0x0008, 0xf2b: 0x0008, 0xf2c: 0x0008, 0xf2d: 0x0008, 0xf2e: 0x0008, 0xf2f: 0x0008,
+ 0xf30: 0x0008, 0xf31: 0x0008, 0xf32: 0x0008, 0xf33: 0x0008, 0xf34: 0x0008, 0xf35: 0x0008,
+ 0xf36: 0x0008, 0xf37: 0x0008, 0xf38: 0x0008, 0xf39: 0x0008, 0xf3a: 0x0008, 0xf3b: 0x0040,
+ 0xf3c: 0x0040, 0xf3d: 0x0040, 0xf3e: 0x0040, 0xf3f: 0x0040,
+ // Block 0x3d, offset 0xf40
+ 0xf40: 0x36a2, 0xf41: 0x36d2, 0xf42: 0x3702, 0xf43: 0x3732, 0xf44: 0x32d5, 0xf45: 0x32f5,
+ 0xf46: 0x3315, 0xf47: 0x3335, 0xf48: 0x0018, 0xf49: 0x0018, 0xf4a: 0x0018, 0xf4b: 0x0018,
+ 0xf4c: 0x0018, 0xf4d: 0x0018, 0xf4e: 0x0018, 0xf4f: 0x0018, 0xf50: 0x3355, 0xf51: 0x3761,
+ 0xf52: 0x3779, 0xf53: 0x3791, 0xf54: 0x37a9, 0xf55: 0x37c1, 0xf56: 0x37d9, 0xf57: 0x37f1,
+ 0xf58: 0x3809, 0xf59: 0x3821, 0xf5a: 0x3839, 0xf5b: 0x3851, 0xf5c: 0x3869, 0xf5d: 0x3881,
+ 0xf5e: 0x3899, 0xf5f: 0x38b1, 0xf60: 0x3375, 0xf61: 0x3395, 0xf62: 0x33b5, 0xf63: 0x33d5,
+ 0xf64: 0x33f5, 0xf65: 0x33f5, 0xf66: 0x3415, 0xf67: 0x3435, 0xf68: 0x3455, 0xf69: 0x3475,
+ 0xf6a: 0x3495, 0xf6b: 0x34b5, 0xf6c: 0x34d5, 0xf6d: 0x34f5, 0xf6e: 0x3515, 0xf6f: 0x3535,
+ 0xf70: 0x3555, 0xf71: 0x3575, 0xf72: 0x3595, 0xf73: 0x35b5, 0xf74: 0x35d5, 0xf75: 0x35f5,
+ 0xf76: 0x3615, 0xf77: 0x3635, 0xf78: 0x3655, 0xf79: 0x3675, 0xf7a: 0x3695, 0xf7b: 0x36b5,
+ 0xf7c: 0x38c9, 0xf7d: 0x3901, 0xf7e: 0x36d5, 0xf7f: 0x0018,
+ // Block 0x3e, offset 0xf80
+ 0xf80: 0x36f5, 0xf81: 0x3715, 0xf82: 0x3735, 0xf83: 0x3755, 0xf84: 0x3775, 0xf85: 0x3795,
+ 0xf86: 0x37b5, 0xf87: 0x37d5, 0xf88: 0x37f5, 0xf89: 0x3815, 0xf8a: 0x3835, 0xf8b: 0x3855,
+ 0xf8c: 0x3875, 0xf8d: 0x3895, 0xf8e: 0x38b5, 0xf8f: 0x38d5, 0xf90: 0x38f5, 0xf91: 0x3915,
+ 0xf92: 0x3935, 0xf93: 0x3955, 0xf94: 0x3975, 0xf95: 0x3995, 0xf96: 0x39b5, 0xf97: 0x39d5,
+ 0xf98: 0x39f5, 0xf99: 0x3a15, 0xf9a: 0x3a35, 0xf9b: 0x3a55, 0xf9c: 0x3a75, 0xf9d: 0x3a95,
+ 0xf9e: 0x3ab5, 0xf9f: 0x3ad5, 0xfa0: 0x3af5, 0xfa1: 0x3b15, 0xfa2: 0x3b35, 0xfa3: 0x3b55,
+ 0xfa4: 0x3b75, 0xfa5: 0x3b95, 0xfa6: 0x1295, 0xfa7: 0x3bb5, 0xfa8: 0x3bd5, 0xfa9: 0x3bf5,
+ 0xfaa: 0x3c15, 0xfab: 0x3c35, 0xfac: 0x3c55, 0xfad: 0x3c75, 0xfae: 0x23b5, 0xfaf: 0x3c95,
+ 0xfb0: 0x3cb5, 0xfb1: 0x3939, 0xfb2: 0x3951, 0xfb3: 0x3969, 0xfb4: 0x3981, 0xfb5: 0x3999,
+ 0xfb6: 0x39b1, 0xfb7: 0x39c9, 0xfb8: 0x39e1, 0xfb9: 0x39f9, 0xfba: 0x3a11, 0xfbb: 0x3a29,
+ 0xfbc: 0x3a41, 0xfbd: 0x3a59, 0xfbe: 0x3a71, 0xfbf: 0x3a89,
+ // Block 0x3f, offset 0xfc0
+ 0xfc0: 0x3aa1, 0xfc1: 0x3ac9, 0xfc2: 0x3af1, 0xfc3: 0x3b19, 0xfc4: 0x3b41, 0xfc5: 0x3b69,
+ 0xfc6: 0x3b91, 0xfc7: 0x3bb9, 0xfc8: 0x3be1, 0xfc9: 0x3c09, 0xfca: 0x3c39, 0xfcb: 0x3c69,
+ 0xfcc: 0x3c99, 0xfcd: 0x3cd5, 0xfce: 0x3cb1, 0xfcf: 0x3cf5, 0xfd0: 0x3d15, 0xfd1: 0x3d2d,
+ 0xfd2: 0x3d45, 0xfd3: 0x3d5d, 0xfd4: 0x3d75, 0xfd5: 0x3d75, 0xfd6: 0x3d5d, 0xfd7: 0x3d8d,
+ 0xfd8: 0x07d5, 0xfd9: 0x3da5, 0xfda: 0x3dbd, 0xfdb: 0x3dd5, 0xfdc: 0x3ded, 0xfdd: 0x3e05,
+ 0xfde: 0x3e1d, 0xfdf: 0x3e35, 0xfe0: 0x3e4d, 0xfe1: 0x3e65, 0xfe2: 0x3e7d, 0xfe3: 0x3e95,
+ 0xfe4: 0x3ead, 0xfe5: 0x3ead, 0xfe6: 0x3ec5, 0xfe7: 0x3ec5, 0xfe8: 0x3edd, 0xfe9: 0x3edd,
+ 0xfea: 0x3ef5, 0xfeb: 0x3f0d, 0xfec: 0x3f25, 0xfed: 0x3f3d, 0xfee: 0x3f55, 0xfef: 0x3f55,
+ 0xff0: 0x3f6d, 0xff1: 0x3f6d, 0xff2: 0x3f6d, 0xff3: 0x3f85, 0xff4: 0x3f9d, 0xff5: 0x3fb5,
+ 0xff6: 0x3fcd, 0xff7: 0x3fb5, 0xff8: 0x3fe5, 0xff9: 0x3ffd, 0xffa: 0x3f85, 0xffb: 0x4015,
+ 0xffc: 0x402d, 0xffd: 0x402d, 0xffe: 0x402d, 0xfff: 0x0040,
+ // Block 0x40, offset 0x1000
+ 0x1000: 0x3cc9, 0x1001: 0x3d31, 0x1002: 0x3d99, 0x1003: 0x3e01, 0x1004: 0x3e51, 0x1005: 0x3eb9,
+ 0x1006: 0x3f09, 0x1007: 0x3f59, 0x1008: 0x3fd9, 0x1009: 0x4041, 0x100a: 0x4091, 0x100b: 0x40e1,
+ 0x100c: 0x4131, 0x100d: 0x4199, 0x100e: 0x4201, 0x100f: 0x4251, 0x1010: 0x42a1, 0x1011: 0x42d9,
+ 0x1012: 0x4329, 0x1013: 0x4391, 0x1014: 0x43f9, 0x1015: 0x4431, 0x1016: 0x44b1, 0x1017: 0x4549,
+ 0x1018: 0x45c9, 0x1019: 0x4619, 0x101a: 0x4699, 0x101b: 0x4719, 0x101c: 0x4781, 0x101d: 0x47d1,
+ 0x101e: 0x4821, 0x101f: 0x4871, 0x1020: 0x48d9, 0x1021: 0x4959, 0x1022: 0x49c1, 0x1023: 0x4a11,
+ 0x1024: 0x4a61, 0x1025: 0x4ab1, 0x1026: 0x4ae9, 0x1027: 0x4b21, 0x1028: 0x4b59, 0x1029: 0x4b91,
+ 0x102a: 0x4be1, 0x102b: 0x4c31, 0x102c: 0x4cb1, 0x102d: 0x4d01, 0x102e: 0x4d69, 0x102f: 0x4de9,
+ 0x1030: 0x4e39, 0x1031: 0x4e71, 0x1032: 0x4ea9, 0x1033: 0x4f29, 0x1034: 0x4f91, 0x1035: 0x5011,
+ 0x1036: 0x5061, 0x1037: 0x50e1, 0x1038: 0x5119, 0x1039: 0x5169, 0x103a: 0x51b9, 0x103b: 0x5209,
+ 0x103c: 0x5259, 0x103d: 0x52a9, 0x103e: 0x5311, 0x103f: 0x5361,
+ // Block 0x41, offset 0x1040
+ 0x1040: 0x5399, 0x1041: 0x53e9, 0x1042: 0x5439, 0x1043: 0x5489, 0x1044: 0x54f1, 0x1045: 0x5541,
+ 0x1046: 0x5591, 0x1047: 0x55e1, 0x1048: 0x5661, 0x1049: 0x56c9, 0x104a: 0x5701, 0x104b: 0x5781,
+ 0x104c: 0x57b9, 0x104d: 0x5821, 0x104e: 0x5889, 0x104f: 0x58d9, 0x1050: 0x5929, 0x1051: 0x5979,
+ 0x1052: 0x59e1, 0x1053: 0x5a19, 0x1054: 0x5a69, 0x1055: 0x5ad1, 0x1056: 0x5b09, 0x1057: 0x5b89,
+ 0x1058: 0x5bd9, 0x1059: 0x5c01, 0x105a: 0x5c29, 0x105b: 0x5c51, 0x105c: 0x5c79, 0x105d: 0x5ca1,
+ 0x105e: 0x5cc9, 0x105f: 0x5cf1, 0x1060: 0x5d19, 0x1061: 0x5d41, 0x1062: 0x5d69, 0x1063: 0x5d99,
+ 0x1064: 0x5dc9, 0x1065: 0x5df9, 0x1066: 0x5e29, 0x1067: 0x5e59, 0x1068: 0x5e89, 0x1069: 0x5eb9,
+ 0x106a: 0x5ee9, 0x106b: 0x5f19, 0x106c: 0x5f49, 0x106d: 0x5f79, 0x106e: 0x5fa9, 0x106f: 0x5fd9,
+ 0x1070: 0x6009, 0x1071: 0x4045, 0x1072: 0x6039, 0x1073: 0x6051, 0x1074: 0x4065, 0x1075: 0x6069,
+ 0x1076: 0x6081, 0x1077: 0x6099, 0x1078: 0x4085, 0x1079: 0x4085, 0x107a: 0x60b1, 0x107b: 0x60c9,
+ 0x107c: 0x6101, 0x107d: 0x6139, 0x107e: 0x6171, 0x107f: 0x61a9,
+ // Block 0x42, offset 0x1080
+ 0x1080: 0x6211, 0x1081: 0x6229, 0x1082: 0x40a5, 0x1083: 0x6241, 0x1084: 0x6259, 0x1085: 0x6271,
+ 0x1086: 0x6289, 0x1087: 0x62a1, 0x1088: 0x40c5, 0x1089: 0x62b9, 0x108a: 0x62e1, 0x108b: 0x62f9,
+ 0x108c: 0x40e5, 0x108d: 0x40e5, 0x108e: 0x6311, 0x108f: 0x6329, 0x1090: 0x6341, 0x1091: 0x4105,
+ 0x1092: 0x4125, 0x1093: 0x4145, 0x1094: 0x4165, 0x1095: 0x4185, 0x1096: 0x6359, 0x1097: 0x6371,
+ 0x1098: 0x6389, 0x1099: 0x63a1, 0x109a: 0x63b9, 0x109b: 0x41a5, 0x109c: 0x63d1, 0x109d: 0x63e9,
+ 0x109e: 0x6401, 0x109f: 0x41c5, 0x10a0: 0x41e5, 0x10a1: 0x6419, 0x10a2: 0x4205, 0x10a3: 0x4225,
+ 0x10a4: 0x4245, 0x10a5: 0x6431, 0x10a6: 0x4265, 0x10a7: 0x6449, 0x10a8: 0x6479, 0x10a9: 0x6211,
+ 0x10aa: 0x4285, 0x10ab: 0x42a5, 0x10ac: 0x42c5, 0x10ad: 0x42e5, 0x10ae: 0x64b1, 0x10af: 0x64f1,
+ 0x10b0: 0x6539, 0x10b1: 0x6551, 0x10b2: 0x4305, 0x10b3: 0x6569, 0x10b4: 0x6581, 0x10b5: 0x6599,
+ 0x10b6: 0x4325, 0x10b7: 0x65b1, 0x10b8: 0x65c9, 0x10b9: 0x65b1, 0x10ba: 0x65e1, 0x10bb: 0x65f9,
+ 0x10bc: 0x4345, 0x10bd: 0x6611, 0x10be: 0x6629, 0x10bf: 0x6611,
+ // Block 0x43, offset 0x10c0
+ 0x10c0: 0x4365, 0x10c1: 0x4385, 0x10c2: 0x0040, 0x10c3: 0x6641, 0x10c4: 0x6659, 0x10c5: 0x6671,
+ 0x10c6: 0x6689, 0x10c7: 0x0040, 0x10c8: 0x66c1, 0x10c9: 0x66d9, 0x10ca: 0x66f1, 0x10cb: 0x6709,
+ 0x10cc: 0x6721, 0x10cd: 0x6739, 0x10ce: 0x6401, 0x10cf: 0x6751, 0x10d0: 0x6769, 0x10d1: 0x6781,
+ 0x10d2: 0x43a5, 0x10d3: 0x6799, 0x10d4: 0x6289, 0x10d5: 0x43c5, 0x10d6: 0x43e5, 0x10d7: 0x67b1,
+ 0x10d8: 0x0040, 0x10d9: 0x4405, 0x10da: 0x67c9, 0x10db: 0x67e1, 0x10dc: 0x67f9, 0x10dd: 0x6811,
+ 0x10de: 0x6829, 0x10df: 0x6859, 0x10e0: 0x6889, 0x10e1: 0x68b1, 0x10e2: 0x68d9, 0x10e3: 0x6901,
+ 0x10e4: 0x6929, 0x10e5: 0x6951, 0x10e6: 0x6979, 0x10e7: 0x69a1, 0x10e8: 0x69c9, 0x10e9: 0x69f1,
+ 0x10ea: 0x6a21, 0x10eb: 0x6a51, 0x10ec: 0x6a81, 0x10ed: 0x6ab1, 0x10ee: 0x6ae1, 0x10ef: 0x6b11,
+ 0x10f0: 0x6b41, 0x10f1: 0x6b71, 0x10f2: 0x6ba1, 0x10f3: 0x6bd1, 0x10f4: 0x6c01, 0x10f5: 0x6c31,
+ 0x10f6: 0x6c61, 0x10f7: 0x6c91, 0x10f8: 0x6cc1, 0x10f9: 0x6cf1, 0x10fa: 0x6d21, 0x10fb: 0x6d51,
+ 0x10fc: 0x6d81, 0x10fd: 0x6db1, 0x10fe: 0x6de1, 0x10ff: 0x4425,
+ // Block 0x44, offset 0x1100
+ 0x1100: 0xe00d, 0x1101: 0x0008, 0x1102: 0xe00d, 0x1103: 0x0008, 0x1104: 0xe00d, 0x1105: 0x0008,
+ 0x1106: 0xe00d, 0x1107: 0x0008, 0x1108: 0xe00d, 0x1109: 0x0008, 0x110a: 0xe00d, 0x110b: 0x0008,
+ 0x110c: 0xe00d, 0x110d: 0x0008, 0x110e: 0xe00d, 0x110f: 0x0008, 0x1110: 0xe00d, 0x1111: 0x0008,
+ 0x1112: 0xe00d, 0x1113: 0x0008, 0x1114: 0xe00d, 0x1115: 0x0008, 0x1116: 0xe00d, 0x1117: 0x0008,
+ 0x1118: 0xe00d, 0x1119: 0x0008, 0x111a: 0xe00d, 0x111b: 0x0008, 0x111c: 0xe00d, 0x111d: 0x0008,
+ 0x111e: 0xe00d, 0x111f: 0x0008, 0x1120: 0xe00d, 0x1121: 0x0008, 0x1122: 0xe00d, 0x1123: 0x0008,
+ 0x1124: 0xe00d, 0x1125: 0x0008, 0x1126: 0xe00d, 0x1127: 0x0008, 0x1128: 0xe00d, 0x1129: 0x0008,
+ 0x112a: 0xe00d, 0x112b: 0x0008, 0x112c: 0xe00d, 0x112d: 0x0008, 0x112e: 0x0008, 0x112f: 0x3308,
+ 0x1130: 0x3318, 0x1131: 0x3318, 0x1132: 0x3318, 0x1133: 0x0018, 0x1134: 0x3308, 0x1135: 0x3308,
+ 0x1136: 0x3308, 0x1137: 0x3308, 0x1138: 0x3308, 0x1139: 0x3308, 0x113a: 0x3308, 0x113b: 0x3308,
+ 0x113c: 0x3308, 0x113d: 0x3308, 0x113e: 0x0018, 0x113f: 0x0008,
+ // Block 0x45, offset 0x1140
+ 0x1140: 0xe00d, 0x1141: 0x0008, 0x1142: 0xe00d, 0x1143: 0x0008, 0x1144: 0xe00d, 0x1145: 0x0008,
+ 0x1146: 0xe00d, 0x1147: 0x0008, 0x1148: 0xe00d, 0x1149: 0x0008, 0x114a: 0xe00d, 0x114b: 0x0008,
+ 0x114c: 0xe00d, 0x114d: 0x0008, 0x114e: 0xe00d, 0x114f: 0x0008, 0x1150: 0xe00d, 0x1151: 0x0008,
+ 0x1152: 0xe00d, 0x1153: 0x0008, 0x1154: 0xe00d, 0x1155: 0x0008, 0x1156: 0xe00d, 0x1157: 0x0008,
+ 0x1158: 0xe00d, 0x1159: 0x0008, 0x115a: 0xe00d, 0x115b: 0x0008, 0x115c: 0x0ea1, 0x115d: 0x6e11,
+ 0x115e: 0x3308, 0x115f: 0x3308, 0x1160: 0x0008, 0x1161: 0x0008, 0x1162: 0x0008, 0x1163: 0x0008,
+ 0x1164: 0x0008, 0x1165: 0x0008, 0x1166: 0x0008, 0x1167: 0x0008, 0x1168: 0x0008, 0x1169: 0x0008,
+ 0x116a: 0x0008, 0x116b: 0x0008, 0x116c: 0x0008, 0x116d: 0x0008, 0x116e: 0x0008, 0x116f: 0x0008,
+ 0x1170: 0x0008, 0x1171: 0x0008, 0x1172: 0x0008, 0x1173: 0x0008, 0x1174: 0x0008, 0x1175: 0x0008,
+ 0x1176: 0x0008, 0x1177: 0x0008, 0x1178: 0x0008, 0x1179: 0x0008, 0x117a: 0x0008, 0x117b: 0x0008,
+ 0x117c: 0x0008, 0x117d: 0x0008, 0x117e: 0x0008, 0x117f: 0x0008,
+ // Block 0x46, offset 0x1180
+ 0x1180: 0x0018, 0x1181: 0x0018, 0x1182: 0x0018, 0x1183: 0x0018, 0x1184: 0x0018, 0x1185: 0x0018,
+ 0x1186: 0x0018, 0x1187: 0x0018, 0x1188: 0x0018, 0x1189: 0x0018, 0x118a: 0x0018, 0x118b: 0x0018,
+ 0x118c: 0x0018, 0x118d: 0x0018, 0x118e: 0x0018, 0x118f: 0x0018, 0x1190: 0x0018, 0x1191: 0x0018,
+ 0x1192: 0x0018, 0x1193: 0x0018, 0x1194: 0x0018, 0x1195: 0x0018, 0x1196: 0x0018, 0x1197: 0x0008,
+ 0x1198: 0x0008, 0x1199: 0x0008, 0x119a: 0x0008, 0x119b: 0x0008, 0x119c: 0x0008, 0x119d: 0x0008,
+ 0x119e: 0x0008, 0x119f: 0x0008, 0x11a0: 0x0018, 0x11a1: 0x0018, 0x11a2: 0xe00d, 0x11a3: 0x0008,
+ 0x11a4: 0xe00d, 0x11a5: 0x0008, 0x11a6: 0xe00d, 0x11a7: 0x0008, 0x11a8: 0xe00d, 0x11a9: 0x0008,
+ 0x11aa: 0xe00d, 0x11ab: 0x0008, 0x11ac: 0xe00d, 0x11ad: 0x0008, 0x11ae: 0xe00d, 0x11af: 0x0008,
+ 0x11b0: 0x0008, 0x11b1: 0x0008, 0x11b2: 0xe00d, 0x11b3: 0x0008, 0x11b4: 0xe00d, 0x11b5: 0x0008,
+ 0x11b6: 0xe00d, 0x11b7: 0x0008, 0x11b8: 0xe00d, 0x11b9: 0x0008, 0x11ba: 0xe00d, 0x11bb: 0x0008,
+ 0x11bc: 0xe00d, 0x11bd: 0x0008, 0x11be: 0xe00d, 0x11bf: 0x0008,
+ // Block 0x47, offset 0x11c0
+ 0x11c0: 0xe00d, 0x11c1: 0x0008, 0x11c2: 0xe00d, 0x11c3: 0x0008, 0x11c4: 0xe00d, 0x11c5: 0x0008,
+ 0x11c6: 0xe00d, 0x11c7: 0x0008, 0x11c8: 0xe00d, 0x11c9: 0x0008, 0x11ca: 0xe00d, 0x11cb: 0x0008,
+ 0x11cc: 0xe00d, 0x11cd: 0x0008, 0x11ce: 0xe00d, 0x11cf: 0x0008, 0x11d0: 0xe00d, 0x11d1: 0x0008,
+ 0x11d2: 0xe00d, 0x11d3: 0x0008, 0x11d4: 0xe00d, 0x11d5: 0x0008, 0x11d6: 0xe00d, 0x11d7: 0x0008,
+ 0x11d8: 0xe00d, 0x11d9: 0x0008, 0x11da: 0xe00d, 0x11db: 0x0008, 0x11dc: 0xe00d, 0x11dd: 0x0008,
+ 0x11de: 0xe00d, 0x11df: 0x0008, 0x11e0: 0xe00d, 0x11e1: 0x0008, 0x11e2: 0xe00d, 0x11e3: 0x0008,
+ 0x11e4: 0xe00d, 0x11e5: 0x0008, 0x11e6: 0xe00d, 0x11e7: 0x0008, 0x11e8: 0xe00d, 0x11e9: 0x0008,
+ 0x11ea: 0xe00d, 0x11eb: 0x0008, 0x11ec: 0xe00d, 0x11ed: 0x0008, 0x11ee: 0xe00d, 0x11ef: 0x0008,
+ 0x11f0: 0xe0fd, 0x11f1: 0x0008, 0x11f2: 0x0008, 0x11f3: 0x0008, 0x11f4: 0x0008, 0x11f5: 0x0008,
+ 0x11f6: 0x0008, 0x11f7: 0x0008, 0x11f8: 0x0008, 0x11f9: 0xe01d, 0x11fa: 0x0008, 0x11fb: 0xe03d,
+ 0x11fc: 0x0008, 0x11fd: 0x4445, 0x11fe: 0xe00d, 0x11ff: 0x0008,
+ // Block 0x48, offset 0x1200
+ 0x1200: 0xe00d, 0x1201: 0x0008, 0x1202: 0xe00d, 0x1203: 0x0008, 0x1204: 0xe00d, 0x1205: 0x0008,
+ 0x1206: 0xe00d, 0x1207: 0x0008, 0x1208: 0x0008, 0x1209: 0x0018, 0x120a: 0x0018, 0x120b: 0xe03d,
+ 0x120c: 0x0008, 0x120d: 0x11d9, 0x120e: 0x0008, 0x120f: 0x0008, 0x1210: 0xe00d, 0x1211: 0x0008,
+ 0x1212: 0xe00d, 0x1213: 0x0008, 0x1214: 0x0008, 0x1215: 0x0008, 0x1216: 0xe00d, 0x1217: 0x0008,
+ 0x1218: 0xe00d, 0x1219: 0x0008, 0x121a: 0xe00d, 0x121b: 0x0008, 0x121c: 0xe00d, 0x121d: 0x0008,
+ 0x121e: 0xe00d, 0x121f: 0x0008, 0x1220: 0xe00d, 0x1221: 0x0008, 0x1222: 0xe00d, 0x1223: 0x0008,
+ 0x1224: 0xe00d, 0x1225: 0x0008, 0x1226: 0xe00d, 0x1227: 0x0008, 0x1228: 0xe00d, 0x1229: 0x0008,
+ 0x122a: 0x6e29, 0x122b: 0x1029, 0x122c: 0x11c1, 0x122d: 0x6e41, 0x122e: 0x1221, 0x122f: 0x0008,
+ 0x1230: 0x6e59, 0x1231: 0x6e71, 0x1232: 0x1239, 0x1233: 0x4465, 0x1234: 0xe00d, 0x1235: 0x0008,
+ 0x1236: 0xe00d, 0x1237: 0x0008, 0x1238: 0xe00d, 0x1239: 0x0008, 0x123a: 0xe00d, 0x123b: 0x0008,
+ 0x123c: 0xe00d, 0x123d: 0x0008, 0x123e: 0xe00d, 0x123f: 0x0008,
+ // Block 0x49, offset 0x1240
+ 0x1240: 0x650d, 0x1241: 0x652d, 0x1242: 0x654d, 0x1243: 0x656d, 0x1244: 0x658d, 0x1245: 0x65ad,
+ 0x1246: 0x65cd, 0x1247: 0x65ed, 0x1248: 0x660d, 0x1249: 0x662d, 0x124a: 0x664d, 0x124b: 0x666d,
+ 0x124c: 0x668d, 0x124d: 0x66ad, 0x124e: 0x0008, 0x124f: 0x0008, 0x1250: 0x66cd, 0x1251: 0x0008,
+ 0x1252: 0x66ed, 0x1253: 0x0008, 0x1254: 0x0008, 0x1255: 0x670d, 0x1256: 0x672d, 0x1257: 0x674d,
+ 0x1258: 0x676d, 0x1259: 0x678d, 0x125a: 0x67ad, 0x125b: 0x67cd, 0x125c: 0x67ed, 0x125d: 0x680d,
+ 0x125e: 0x682d, 0x125f: 0x0008, 0x1260: 0x684d, 0x1261: 0x0008, 0x1262: 0x686d, 0x1263: 0x0008,
+ 0x1264: 0x0008, 0x1265: 0x688d, 0x1266: 0x68ad, 0x1267: 0x0008, 0x1268: 0x0008, 0x1269: 0x0008,
+ 0x126a: 0x68cd, 0x126b: 0x68ed, 0x126c: 0x690d, 0x126d: 0x692d, 0x126e: 0x694d, 0x126f: 0x696d,
+ 0x1270: 0x698d, 0x1271: 0x69ad, 0x1272: 0x69cd, 0x1273: 0x69ed, 0x1274: 0x6a0d, 0x1275: 0x6a2d,
+ 0x1276: 0x6a4d, 0x1277: 0x6a6d, 0x1278: 0x6a8d, 0x1279: 0x6aad, 0x127a: 0x6acd, 0x127b: 0x6aed,
+ 0x127c: 0x6b0d, 0x127d: 0x6b2d, 0x127e: 0x6b4d, 0x127f: 0x6b6d,
+ // Block 0x4a, offset 0x1280
+ 0x1280: 0x7acd, 0x1281: 0x7aed, 0x1282: 0x7b0d, 0x1283: 0x7b2d, 0x1284: 0x7b4d, 0x1285: 0x7b6d,
+ 0x1286: 0x7b8d, 0x1287: 0x7bad, 0x1288: 0x7bcd, 0x1289: 0x7bed, 0x128a: 0x7c0d, 0x128b: 0x7c2d,
+ 0x128c: 0x7c4d, 0x128d: 0x7c6d, 0x128e: 0x7c8d, 0x128f: 0x6ec9, 0x1290: 0x6ef1, 0x1291: 0x6f19,
+ 0x1292: 0x7cad, 0x1293: 0x7ccd, 0x1294: 0x7ced, 0x1295: 0x6f41, 0x1296: 0x6f69, 0x1297: 0x6f91,
+ 0x1298: 0x7d0d, 0x1299: 0x7d2d, 0x129a: 0x0040, 0x129b: 0x0040, 0x129c: 0x0040, 0x129d: 0x0040,
+ 0x129e: 0x0040, 0x129f: 0x0040, 0x12a0: 0x0040, 0x12a1: 0x0040, 0x12a2: 0x0040, 0x12a3: 0x0040,
+ 0x12a4: 0x0040, 0x12a5: 0x0040, 0x12a6: 0x0040, 0x12a7: 0x0040, 0x12a8: 0x0040, 0x12a9: 0x0040,
+ 0x12aa: 0x0040, 0x12ab: 0x0040, 0x12ac: 0x0040, 0x12ad: 0x0040, 0x12ae: 0x0040, 0x12af: 0x0040,
+ 0x12b0: 0x0040, 0x12b1: 0x0040, 0x12b2: 0x0040, 0x12b3: 0x0040, 0x12b4: 0x0040, 0x12b5: 0x0040,
+ 0x12b6: 0x0040, 0x12b7: 0x0040, 0x12b8: 0x0040, 0x12b9: 0x0040, 0x12ba: 0x0040, 0x12bb: 0x0040,
+ 0x12bc: 0x0040, 0x12bd: 0x0040, 0x12be: 0x0040, 0x12bf: 0x0040,
+ // Block 0x4b, offset 0x12c0
+ 0x12c0: 0x6fb9, 0x12c1: 0x6fd1, 0x12c2: 0x6fe9, 0x12c3: 0x7d4d, 0x12c4: 0x7d6d, 0x12c5: 0x7001,
+ 0x12c6: 0x7001, 0x12c7: 0x0040, 0x12c8: 0x0040, 0x12c9: 0x0040, 0x12ca: 0x0040, 0x12cb: 0x0040,
+ 0x12cc: 0x0040, 0x12cd: 0x0040, 0x12ce: 0x0040, 0x12cf: 0x0040, 0x12d0: 0x0040, 0x12d1: 0x0040,
+ 0x12d2: 0x0040, 0x12d3: 0x7019, 0x12d4: 0x7041, 0x12d5: 0x7069, 0x12d6: 0x7091, 0x12d7: 0x70b9,
+ 0x12d8: 0x0040, 0x12d9: 0x0040, 0x12da: 0x0040, 0x12db: 0x0040, 0x12dc: 0x0040, 0x12dd: 0x70e1,
+ 0x12de: 0x3308, 0x12df: 0x7109, 0x12e0: 0x7131, 0x12e1: 0x20a9, 0x12e2: 0x20f1, 0x12e3: 0x7149,
+ 0x12e4: 0x7161, 0x12e5: 0x7179, 0x12e6: 0x7191, 0x12e7: 0x71a9, 0x12e8: 0x71c1, 0x12e9: 0x1fb2,
+ 0x12ea: 0x71d9, 0x12eb: 0x7201, 0x12ec: 0x7229, 0x12ed: 0x7261, 0x12ee: 0x7299, 0x12ef: 0x72c1,
+ 0x12f0: 0x72e9, 0x12f1: 0x7311, 0x12f2: 0x7339, 0x12f3: 0x7361, 0x12f4: 0x7389, 0x12f5: 0x73b1,
+ 0x12f6: 0x73d9, 0x12f7: 0x0040, 0x12f8: 0x7401, 0x12f9: 0x7429, 0x12fa: 0x7451, 0x12fb: 0x7479,
+ 0x12fc: 0x74a1, 0x12fd: 0x0040, 0x12fe: 0x74c9, 0x12ff: 0x0040,
+ // Block 0x4c, offset 0x1300
+ 0x1300: 0x74f1, 0x1301: 0x7519, 0x1302: 0x0040, 0x1303: 0x7541, 0x1304: 0x7569, 0x1305: 0x0040,
+ 0x1306: 0x7591, 0x1307: 0x75b9, 0x1308: 0x75e1, 0x1309: 0x7609, 0x130a: 0x7631, 0x130b: 0x7659,
+ 0x130c: 0x7681, 0x130d: 0x76a9, 0x130e: 0x76d1, 0x130f: 0x76f9, 0x1310: 0x7721, 0x1311: 0x7721,
+ 0x1312: 0x7739, 0x1313: 0x7739, 0x1314: 0x7739, 0x1315: 0x7739, 0x1316: 0x7751, 0x1317: 0x7751,
+ 0x1318: 0x7751, 0x1319: 0x7751, 0x131a: 0x7769, 0x131b: 0x7769, 0x131c: 0x7769, 0x131d: 0x7769,
+ 0x131e: 0x7781, 0x131f: 0x7781, 0x1320: 0x7781, 0x1321: 0x7781, 0x1322: 0x7799, 0x1323: 0x7799,
+ 0x1324: 0x7799, 0x1325: 0x7799, 0x1326: 0x77b1, 0x1327: 0x77b1, 0x1328: 0x77b1, 0x1329: 0x77b1,
+ 0x132a: 0x77c9, 0x132b: 0x77c9, 0x132c: 0x77c9, 0x132d: 0x77c9, 0x132e: 0x77e1, 0x132f: 0x77e1,
+ 0x1330: 0x77e1, 0x1331: 0x77e1, 0x1332: 0x77f9, 0x1333: 0x77f9, 0x1334: 0x77f9, 0x1335: 0x77f9,
+ 0x1336: 0x7811, 0x1337: 0x7811, 0x1338: 0x7811, 0x1339: 0x7811, 0x133a: 0x7829, 0x133b: 0x7829,
+ 0x133c: 0x7829, 0x133d: 0x7829, 0x133e: 0x7841, 0x133f: 0x7841,
+ // Block 0x4d, offset 0x1340
+ 0x1340: 0x7841, 0x1341: 0x7841, 0x1342: 0x7859, 0x1343: 0x7859, 0x1344: 0x7871, 0x1345: 0x7871,
+ 0x1346: 0x7889, 0x1347: 0x7889, 0x1348: 0x78a1, 0x1349: 0x78a1, 0x134a: 0x78b9, 0x134b: 0x78b9,
+ 0x134c: 0x78d1, 0x134d: 0x78d1, 0x134e: 0x78e9, 0x134f: 0x78e9, 0x1350: 0x78e9, 0x1351: 0x78e9,
+ 0x1352: 0x7901, 0x1353: 0x7901, 0x1354: 0x7901, 0x1355: 0x7901, 0x1356: 0x7919, 0x1357: 0x7919,
+ 0x1358: 0x7919, 0x1359: 0x7919, 0x135a: 0x7931, 0x135b: 0x7931, 0x135c: 0x7931, 0x135d: 0x7931,
+ 0x135e: 0x7949, 0x135f: 0x7949, 0x1360: 0x7961, 0x1361: 0x7961, 0x1362: 0x7961, 0x1363: 0x7961,
+ 0x1364: 0x7979, 0x1365: 0x7979, 0x1366: 0x7991, 0x1367: 0x7991, 0x1368: 0x7991, 0x1369: 0x7991,
+ 0x136a: 0x79a9, 0x136b: 0x79a9, 0x136c: 0x79a9, 0x136d: 0x79a9, 0x136e: 0x79c1, 0x136f: 0x79c1,
+ 0x1370: 0x79d9, 0x1371: 0x79d9, 0x1372: 0x0818, 0x1373: 0x0818, 0x1374: 0x0818, 0x1375: 0x0818,
+ 0x1376: 0x0818, 0x1377: 0x0818, 0x1378: 0x0818, 0x1379: 0x0818, 0x137a: 0x0818, 0x137b: 0x0818,
+ 0x137c: 0x0818, 0x137d: 0x0818, 0x137e: 0x0818, 0x137f: 0x0818,
+ // Block 0x4e, offset 0x1380
+ 0x1380: 0x0818, 0x1381: 0x0818, 0x1382: 0x0040, 0x1383: 0x0040, 0x1384: 0x0040, 0x1385: 0x0040,
+ 0x1386: 0x0040, 0x1387: 0x0040, 0x1388: 0x0040, 0x1389: 0x0040, 0x138a: 0x0040, 0x138b: 0x0040,
+ 0x138c: 0x0040, 0x138d: 0x0040, 0x138e: 0x0040, 0x138f: 0x0040, 0x1390: 0x0040, 0x1391: 0x0040,
+ 0x1392: 0x0040, 0x1393: 0x79f1, 0x1394: 0x79f1, 0x1395: 0x79f1, 0x1396: 0x79f1, 0x1397: 0x7a09,
+ 0x1398: 0x7a09, 0x1399: 0x7a21, 0x139a: 0x7a21, 0x139b: 0x7a39, 0x139c: 0x7a39, 0x139d: 0x0479,
+ 0x139e: 0x7a51, 0x139f: 0x7a51, 0x13a0: 0x7a69, 0x13a1: 0x7a69, 0x13a2: 0x7a81, 0x13a3: 0x7a81,
+ 0x13a4: 0x7a99, 0x13a5: 0x7a99, 0x13a6: 0x7a99, 0x13a7: 0x7a99, 0x13a8: 0x7ab1, 0x13a9: 0x7ab1,
+ 0x13aa: 0x7ac9, 0x13ab: 0x7ac9, 0x13ac: 0x7af1, 0x13ad: 0x7af1, 0x13ae: 0x7b19, 0x13af: 0x7b19,
+ 0x13b0: 0x7b41, 0x13b1: 0x7b41, 0x13b2: 0x7b69, 0x13b3: 0x7b69, 0x13b4: 0x7b91, 0x13b5: 0x7b91,
+ 0x13b6: 0x7bb9, 0x13b7: 0x7bb9, 0x13b8: 0x7bb9, 0x13b9: 0x7be1, 0x13ba: 0x7be1, 0x13bb: 0x7be1,
+ 0x13bc: 0x7c09, 0x13bd: 0x7c09, 0x13be: 0x7c09, 0x13bf: 0x7c09,
+ // Block 0x4f, offset 0x13c0
+ 0x13c0: 0x85f9, 0x13c1: 0x8621, 0x13c2: 0x8649, 0x13c3: 0x8671, 0x13c4: 0x8699, 0x13c5: 0x86c1,
+ 0x13c6: 0x86e9, 0x13c7: 0x8711, 0x13c8: 0x8739, 0x13c9: 0x8761, 0x13ca: 0x8789, 0x13cb: 0x87b1,
+ 0x13cc: 0x87d9, 0x13cd: 0x8801, 0x13ce: 0x8829, 0x13cf: 0x8851, 0x13d0: 0x8879, 0x13d1: 0x88a1,
+ 0x13d2: 0x88c9, 0x13d3: 0x88f1, 0x13d4: 0x8919, 0x13d5: 0x8941, 0x13d6: 0x8969, 0x13d7: 0x8991,
+ 0x13d8: 0x89b9, 0x13d9: 0x89e1, 0x13da: 0x8a09, 0x13db: 0x8a31, 0x13dc: 0x8a59, 0x13dd: 0x8a81,
+ 0x13de: 0x8aaa, 0x13df: 0x8ada, 0x13e0: 0x8b0a, 0x13e1: 0x8b3a, 0x13e2: 0x8b6a, 0x13e3: 0x8b9a,
+ 0x13e4: 0x8bc9, 0x13e5: 0x8bf1, 0x13e6: 0x7c71, 0x13e7: 0x8c19, 0x13e8: 0x7be1, 0x13e9: 0x7c99,
+ 0x13ea: 0x8c41, 0x13eb: 0x8c69, 0x13ec: 0x7d39, 0x13ed: 0x8c91, 0x13ee: 0x7d61, 0x13ef: 0x7d89,
+ 0x13f0: 0x8cb9, 0x13f1: 0x8ce1, 0x13f2: 0x7e29, 0x13f3: 0x8d09, 0x13f4: 0x7e51, 0x13f5: 0x7e79,
+ 0x13f6: 0x8d31, 0x13f7: 0x8d59, 0x13f8: 0x7ec9, 0x13f9: 0x8d81, 0x13fa: 0x7ef1, 0x13fb: 0x7f19,
+ 0x13fc: 0x83a1, 0x13fd: 0x83c9, 0x13fe: 0x8441, 0x13ff: 0x8469,
+ // Block 0x50, offset 0x1400
+ 0x1400: 0x8491, 0x1401: 0x8531, 0x1402: 0x8559, 0x1403: 0x8581, 0x1404: 0x85a9, 0x1405: 0x8649,
+ 0x1406: 0x8671, 0x1407: 0x8699, 0x1408: 0x8da9, 0x1409: 0x8739, 0x140a: 0x8dd1, 0x140b: 0x8df9,
+ 0x140c: 0x8829, 0x140d: 0x8e21, 0x140e: 0x8851, 0x140f: 0x8879, 0x1410: 0x8a81, 0x1411: 0x8e49,
+ 0x1412: 0x8e71, 0x1413: 0x89b9, 0x1414: 0x8e99, 0x1415: 0x89e1, 0x1416: 0x8a09, 0x1417: 0x7c21,
+ 0x1418: 0x7c49, 0x1419: 0x8ec1, 0x141a: 0x7c71, 0x141b: 0x8ee9, 0x141c: 0x7cc1, 0x141d: 0x7ce9,
+ 0x141e: 0x7d11, 0x141f: 0x7d39, 0x1420: 0x8f11, 0x1421: 0x7db1, 0x1422: 0x7dd9, 0x1423: 0x7e01,
+ 0x1424: 0x7e29, 0x1425: 0x8f39, 0x1426: 0x7ec9, 0x1427: 0x7f41, 0x1428: 0x7f69, 0x1429: 0x7f91,
+ 0x142a: 0x7fb9, 0x142b: 0x7fe1, 0x142c: 0x8031, 0x142d: 0x8059, 0x142e: 0x8081, 0x142f: 0x80a9,
+ 0x1430: 0x80d1, 0x1431: 0x80f9, 0x1432: 0x8f61, 0x1433: 0x8121, 0x1434: 0x8149, 0x1435: 0x8171,
+ 0x1436: 0x8199, 0x1437: 0x81c1, 0x1438: 0x81e9, 0x1439: 0x8239, 0x143a: 0x8261, 0x143b: 0x8289,
+ 0x143c: 0x82b1, 0x143d: 0x82d9, 0x143e: 0x8301, 0x143f: 0x8329,
+ // Block 0x51, offset 0x1440
+ 0x1440: 0x8351, 0x1441: 0x8379, 0x1442: 0x83f1, 0x1443: 0x8419, 0x1444: 0x84b9, 0x1445: 0x84e1,
+ 0x1446: 0x8509, 0x1447: 0x8531, 0x1448: 0x8559, 0x1449: 0x85d1, 0x144a: 0x85f9, 0x144b: 0x8621,
+ 0x144c: 0x8649, 0x144d: 0x8f89, 0x144e: 0x86c1, 0x144f: 0x86e9, 0x1450: 0x8711, 0x1451: 0x8739,
+ 0x1452: 0x87b1, 0x1453: 0x87d9, 0x1454: 0x8801, 0x1455: 0x8829, 0x1456: 0x8fb1, 0x1457: 0x88a1,
+ 0x1458: 0x88c9, 0x1459: 0x8fd9, 0x145a: 0x8941, 0x145b: 0x8969, 0x145c: 0x8991, 0x145d: 0x89b9,
+ 0x145e: 0x9001, 0x145f: 0x7c71, 0x1460: 0x8ee9, 0x1461: 0x7d39, 0x1462: 0x8f11, 0x1463: 0x7e29,
+ 0x1464: 0x8f39, 0x1465: 0x7ec9, 0x1466: 0x9029, 0x1467: 0x80d1, 0x1468: 0x9051, 0x1469: 0x9079,
+ 0x146a: 0x90a1, 0x146b: 0x8531, 0x146c: 0x8559, 0x146d: 0x8649, 0x146e: 0x8829, 0x146f: 0x8fb1,
+ 0x1470: 0x89b9, 0x1471: 0x9001, 0x1472: 0x90c9, 0x1473: 0x9101, 0x1474: 0x9139, 0x1475: 0x9171,
+ 0x1476: 0x9199, 0x1477: 0x91c1, 0x1478: 0x91e9, 0x1479: 0x9211, 0x147a: 0x9239, 0x147b: 0x9261,
+ 0x147c: 0x9289, 0x147d: 0x92b1, 0x147e: 0x92d9, 0x147f: 0x9301,
+ // Block 0x52, offset 0x1480
+ 0x1480: 0x9329, 0x1481: 0x9351, 0x1482: 0x9379, 0x1483: 0x93a1, 0x1484: 0x93c9, 0x1485: 0x93f1,
+ 0x1486: 0x9419, 0x1487: 0x9441, 0x1488: 0x9469, 0x1489: 0x9491, 0x148a: 0x94b9, 0x148b: 0x94e1,
+ 0x148c: 0x9079, 0x148d: 0x9509, 0x148e: 0x9531, 0x148f: 0x9559, 0x1490: 0x9581, 0x1491: 0x9171,
+ 0x1492: 0x9199, 0x1493: 0x91c1, 0x1494: 0x91e9, 0x1495: 0x9211, 0x1496: 0x9239, 0x1497: 0x9261,
+ 0x1498: 0x9289, 0x1499: 0x92b1, 0x149a: 0x92d9, 0x149b: 0x9301, 0x149c: 0x9329, 0x149d: 0x9351,
+ 0x149e: 0x9379, 0x149f: 0x93a1, 0x14a0: 0x93c9, 0x14a1: 0x93f1, 0x14a2: 0x9419, 0x14a3: 0x9441,
+ 0x14a4: 0x9469, 0x14a5: 0x9491, 0x14a6: 0x94b9, 0x14a7: 0x94e1, 0x14a8: 0x9079, 0x14a9: 0x9509,
+ 0x14aa: 0x9531, 0x14ab: 0x9559, 0x14ac: 0x9581, 0x14ad: 0x9491, 0x14ae: 0x94b9, 0x14af: 0x94e1,
+ 0x14b0: 0x9079, 0x14b1: 0x9051, 0x14b2: 0x90a1, 0x14b3: 0x8211, 0x14b4: 0x8059, 0x14b5: 0x8081,
+ 0x14b6: 0x80a9, 0x14b7: 0x9491, 0x14b8: 0x94b9, 0x14b9: 0x94e1, 0x14ba: 0x8211, 0x14bb: 0x8239,
+ 0x14bc: 0x95a9, 0x14bd: 0x95a9, 0x14be: 0x0018, 0x14bf: 0x0018,
+ // Block 0x53, offset 0x14c0
+ 0x14c0: 0x0040, 0x14c1: 0x0040, 0x14c2: 0x0040, 0x14c3: 0x0040, 0x14c4: 0x0040, 0x14c5: 0x0040,
+ 0x14c6: 0x0040, 0x14c7: 0x0040, 0x14c8: 0x0040, 0x14c9: 0x0040, 0x14ca: 0x0040, 0x14cb: 0x0040,
+ 0x14cc: 0x0040, 0x14cd: 0x0040, 0x14ce: 0x0040, 0x14cf: 0x0040, 0x14d0: 0x95d1, 0x14d1: 0x9609,
+ 0x14d2: 0x9609, 0x14d3: 0x9641, 0x14d4: 0x9679, 0x14d5: 0x96b1, 0x14d6: 0x96e9, 0x14d7: 0x9721,
+ 0x14d8: 0x9759, 0x14d9: 0x9759, 0x14da: 0x9791, 0x14db: 0x97c9, 0x14dc: 0x9801, 0x14dd: 0x9839,
+ 0x14de: 0x9871, 0x14df: 0x98a9, 0x14e0: 0x98a9, 0x14e1: 0x98e1, 0x14e2: 0x9919, 0x14e3: 0x9919,
+ 0x14e4: 0x9951, 0x14e5: 0x9951, 0x14e6: 0x9989, 0x14e7: 0x99c1, 0x14e8: 0x99c1, 0x14e9: 0x99f9,
+ 0x14ea: 0x9a31, 0x14eb: 0x9a31, 0x14ec: 0x9a69, 0x14ed: 0x9a69, 0x14ee: 0x9aa1, 0x14ef: 0x9ad9,
+ 0x14f0: 0x9ad9, 0x14f1: 0x9b11, 0x14f2: 0x9b11, 0x14f3: 0x9b49, 0x14f4: 0x9b81, 0x14f5: 0x9bb9,
+ 0x14f6: 0x9bf1, 0x14f7: 0x9bf1, 0x14f8: 0x9c29, 0x14f9: 0x9c61, 0x14fa: 0x9c99, 0x14fb: 0x9cd1,
+ 0x14fc: 0x9d09, 0x14fd: 0x9d09, 0x14fe: 0x9d41, 0x14ff: 0x9d79,
+ // Block 0x54, offset 0x1500
+ 0x1500: 0xa949, 0x1501: 0xa981, 0x1502: 0xa9b9, 0x1503: 0xa8a1, 0x1504: 0x9bb9, 0x1505: 0x9989,
+ 0x1506: 0xa9f1, 0x1507: 0xaa29, 0x1508: 0x0040, 0x1509: 0x0040, 0x150a: 0x0040, 0x150b: 0x0040,
+ 0x150c: 0x0040, 0x150d: 0x0040, 0x150e: 0x0040, 0x150f: 0x0040, 0x1510: 0x0040, 0x1511: 0x0040,
+ 0x1512: 0x0040, 0x1513: 0x0040, 0x1514: 0x0040, 0x1515: 0x0040, 0x1516: 0x0040, 0x1517: 0x0040,
+ 0x1518: 0x0040, 0x1519: 0x0040, 0x151a: 0x0040, 0x151b: 0x0040, 0x151c: 0x0040, 0x151d: 0x0040,
+ 0x151e: 0x0040, 0x151f: 0x0040, 0x1520: 0x0040, 0x1521: 0x0040, 0x1522: 0x0040, 0x1523: 0x0040,
+ 0x1524: 0x0040, 0x1525: 0x0040, 0x1526: 0x0040, 0x1527: 0x0040, 0x1528: 0x0040, 0x1529: 0x0040,
+ 0x152a: 0x0040, 0x152b: 0x0040, 0x152c: 0x0040, 0x152d: 0x0040, 0x152e: 0x0040, 0x152f: 0x0040,
+ 0x1530: 0xaa61, 0x1531: 0xaa99, 0x1532: 0xaad1, 0x1533: 0xab19, 0x1534: 0xab61, 0x1535: 0xaba9,
+ 0x1536: 0xabf1, 0x1537: 0xac39, 0x1538: 0xac81, 0x1539: 0xacc9, 0x153a: 0xad02, 0x153b: 0xae12,
+ 0x153c: 0xae91, 0x153d: 0x0018, 0x153e: 0x0040, 0x153f: 0x0040,
+ // Block 0x55, offset 0x1540
+ 0x1540: 0x33c0, 0x1541: 0x33c0, 0x1542: 0x33c0, 0x1543: 0x33c0, 0x1544: 0x33c0, 0x1545: 0x33c0,
+ 0x1546: 0x33c0, 0x1547: 0x33c0, 0x1548: 0x33c0, 0x1549: 0x33c0, 0x154a: 0x33c0, 0x154b: 0x33c0,
+ 0x154c: 0x33c0, 0x154d: 0x33c0, 0x154e: 0x33c0, 0x154f: 0x33c0, 0x1550: 0xaeda, 0x1551: 0x7d8d,
+ 0x1552: 0x0040, 0x1553: 0xaeea, 0x1554: 0x03c2, 0x1555: 0xaefa, 0x1556: 0xaf0a, 0x1557: 0x7dad,
+ 0x1558: 0x7dcd, 0x1559: 0x0040, 0x155a: 0x0040, 0x155b: 0x0040, 0x155c: 0x0040, 0x155d: 0x0040,
+ 0x155e: 0x0040, 0x155f: 0x0040, 0x1560: 0x3308, 0x1561: 0x3308, 0x1562: 0x3308, 0x1563: 0x3308,
+ 0x1564: 0x3308, 0x1565: 0x3308, 0x1566: 0x3308, 0x1567: 0x3308, 0x1568: 0x3308, 0x1569: 0x3308,
+ 0x156a: 0x3308, 0x156b: 0x3308, 0x156c: 0x3308, 0x156d: 0x3308, 0x156e: 0x3308, 0x156f: 0x3308,
+ 0x1570: 0x0040, 0x1571: 0x7ded, 0x1572: 0x7e0d, 0x1573: 0xaf1a, 0x1574: 0xaf1a, 0x1575: 0x1fd2,
+ 0x1576: 0x1fe2, 0x1577: 0xaf2a, 0x1578: 0xaf3a, 0x1579: 0x7e2d, 0x157a: 0x7e4d, 0x157b: 0x7e6d,
+ 0x157c: 0x7e2d, 0x157d: 0x7e8d, 0x157e: 0x7ead, 0x157f: 0x7e8d,
+ // Block 0x56, offset 0x1580
+ 0x1580: 0x7ecd, 0x1581: 0x7eed, 0x1582: 0x7f0d, 0x1583: 0x7eed, 0x1584: 0x7f2d, 0x1585: 0x0018,
+ 0x1586: 0x0018, 0x1587: 0xaf4a, 0x1588: 0xaf5a, 0x1589: 0x7f4e, 0x158a: 0x7f6e, 0x158b: 0x7f8e,
+ 0x158c: 0x7fae, 0x158d: 0xaf1a, 0x158e: 0xaf1a, 0x158f: 0xaf1a, 0x1590: 0xaeda, 0x1591: 0x7fcd,
+ 0x1592: 0x0040, 0x1593: 0x0040, 0x1594: 0x03c2, 0x1595: 0xaeea, 0x1596: 0xaf0a, 0x1597: 0xaefa,
+ 0x1598: 0x7fed, 0x1599: 0x1fd2, 0x159a: 0x1fe2, 0x159b: 0xaf2a, 0x159c: 0xaf3a, 0x159d: 0x7ecd,
+ 0x159e: 0x7f2d, 0x159f: 0xaf6a, 0x15a0: 0xaf7a, 0x15a1: 0xaf8a, 0x15a2: 0x1fb2, 0x15a3: 0xaf99,
+ 0x15a4: 0xafaa, 0x15a5: 0xafba, 0x15a6: 0x1fc2, 0x15a7: 0x0040, 0x15a8: 0xafca, 0x15a9: 0xafda,
+ 0x15aa: 0xafea, 0x15ab: 0xaffa, 0x15ac: 0x0040, 0x15ad: 0x0040, 0x15ae: 0x0040, 0x15af: 0x0040,
+ 0x15b0: 0x800e, 0x15b1: 0xb009, 0x15b2: 0x802e, 0x15b3: 0x0808, 0x15b4: 0x804e, 0x15b5: 0x0040,
+ 0x15b6: 0x806e, 0x15b7: 0xb031, 0x15b8: 0x808e, 0x15b9: 0xb059, 0x15ba: 0x80ae, 0x15bb: 0xb081,
+ 0x15bc: 0x80ce, 0x15bd: 0xb0a9, 0x15be: 0x80ee, 0x15bf: 0xb0d1,
+ // Block 0x57, offset 0x15c0
+ 0x15c0: 0xb0f9, 0x15c1: 0xb111, 0x15c2: 0xb111, 0x15c3: 0xb129, 0x15c4: 0xb129, 0x15c5: 0xb141,
+ 0x15c6: 0xb141, 0x15c7: 0xb159, 0x15c8: 0xb159, 0x15c9: 0xb171, 0x15ca: 0xb171, 0x15cb: 0xb171,
+ 0x15cc: 0xb171, 0x15cd: 0xb189, 0x15ce: 0xb189, 0x15cf: 0xb1a1, 0x15d0: 0xb1a1, 0x15d1: 0xb1a1,
+ 0x15d2: 0xb1a1, 0x15d3: 0xb1b9, 0x15d4: 0xb1b9, 0x15d5: 0xb1d1, 0x15d6: 0xb1d1, 0x15d7: 0xb1d1,
+ 0x15d8: 0xb1d1, 0x15d9: 0xb1e9, 0x15da: 0xb1e9, 0x15db: 0xb1e9, 0x15dc: 0xb1e9, 0x15dd: 0xb201,
+ 0x15de: 0xb201, 0x15df: 0xb201, 0x15e0: 0xb201, 0x15e1: 0xb219, 0x15e2: 0xb219, 0x15e3: 0xb219,
+ 0x15e4: 0xb219, 0x15e5: 0xb231, 0x15e6: 0xb231, 0x15e7: 0xb231, 0x15e8: 0xb231, 0x15e9: 0xb249,
+ 0x15ea: 0xb249, 0x15eb: 0xb261, 0x15ec: 0xb261, 0x15ed: 0xb279, 0x15ee: 0xb279, 0x15ef: 0xb291,
+ 0x15f0: 0xb291, 0x15f1: 0xb2a9, 0x15f2: 0xb2a9, 0x15f3: 0xb2a9, 0x15f4: 0xb2a9, 0x15f5: 0xb2c1,
+ 0x15f6: 0xb2c1, 0x15f7: 0xb2c1, 0x15f8: 0xb2c1, 0x15f9: 0xb2d9, 0x15fa: 0xb2d9, 0x15fb: 0xb2d9,
+ 0x15fc: 0xb2d9, 0x15fd: 0xb2f1, 0x15fe: 0xb2f1, 0x15ff: 0xb2f1,
+ // Block 0x58, offset 0x1600
+ 0x1600: 0xb2f1, 0x1601: 0xb309, 0x1602: 0xb309, 0x1603: 0xb309, 0x1604: 0xb309, 0x1605: 0xb321,
+ 0x1606: 0xb321, 0x1607: 0xb321, 0x1608: 0xb321, 0x1609: 0xb339, 0x160a: 0xb339, 0x160b: 0xb339,
+ 0x160c: 0xb339, 0x160d: 0xb351, 0x160e: 0xb351, 0x160f: 0xb351, 0x1610: 0xb351, 0x1611: 0xb369,
+ 0x1612: 0xb369, 0x1613: 0xb369, 0x1614: 0xb369, 0x1615: 0xb381, 0x1616: 0xb381, 0x1617: 0xb381,
+ 0x1618: 0xb381, 0x1619: 0xb399, 0x161a: 0xb399, 0x161b: 0xb399, 0x161c: 0xb399, 0x161d: 0xb3b1,
+ 0x161e: 0xb3b1, 0x161f: 0xb3b1, 0x1620: 0xb3b1, 0x1621: 0xb3c9, 0x1622: 0xb3c9, 0x1623: 0xb3c9,
+ 0x1624: 0xb3c9, 0x1625: 0xb3e1, 0x1626: 0xb3e1, 0x1627: 0xb3e1, 0x1628: 0xb3e1, 0x1629: 0xb3f9,
+ 0x162a: 0xb3f9, 0x162b: 0xb3f9, 0x162c: 0xb3f9, 0x162d: 0xb411, 0x162e: 0xb411, 0x162f: 0x7ab1,
+ 0x1630: 0x7ab1, 0x1631: 0xb429, 0x1632: 0xb429, 0x1633: 0xb429, 0x1634: 0xb429, 0x1635: 0xb441,
+ 0x1636: 0xb441, 0x1637: 0xb469, 0x1638: 0xb469, 0x1639: 0xb491, 0x163a: 0xb491, 0x163b: 0xb4b9,
+ 0x163c: 0xb4b9, 0x163d: 0x0040, 0x163e: 0x0040, 0x163f: 0x03c0,
+ // Block 0x59, offset 0x1640
+ 0x1640: 0x0040, 0x1641: 0xaefa, 0x1642: 0xb4e2, 0x1643: 0xaf6a, 0x1644: 0xafda, 0x1645: 0xafea,
+ 0x1646: 0xaf7a, 0x1647: 0xb4f2, 0x1648: 0x1fd2, 0x1649: 0x1fe2, 0x164a: 0xaf8a, 0x164b: 0x1fb2,
+ 0x164c: 0xaeda, 0x164d: 0xaf99, 0x164e: 0x29d1, 0x164f: 0xb502, 0x1650: 0x1f41, 0x1651: 0x00c9,
+ 0x1652: 0x0069, 0x1653: 0x0079, 0x1654: 0x1f51, 0x1655: 0x1f61, 0x1656: 0x1f71, 0x1657: 0x1f81,
+ 0x1658: 0x1f91, 0x1659: 0x1fa1, 0x165a: 0xaeea, 0x165b: 0x03c2, 0x165c: 0xafaa, 0x165d: 0x1fc2,
+ 0x165e: 0xafba, 0x165f: 0xaf0a, 0x1660: 0xaffa, 0x1661: 0x0039, 0x1662: 0x0ee9, 0x1663: 0x1159,
+ 0x1664: 0x0ef9, 0x1665: 0x0f09, 0x1666: 0x1199, 0x1667: 0x0f31, 0x1668: 0x0249, 0x1669: 0x0f41,
+ 0x166a: 0x0259, 0x166b: 0x0f51, 0x166c: 0x0359, 0x166d: 0x0f61, 0x166e: 0x0f71, 0x166f: 0x00d9,
+ 0x1670: 0x0f99, 0x1671: 0x2039, 0x1672: 0x0269, 0x1673: 0x01d9, 0x1674: 0x0fa9, 0x1675: 0x0fb9,
+ 0x1676: 0x1089, 0x1677: 0x0279, 0x1678: 0x0369, 0x1679: 0x0289, 0x167a: 0x13d1, 0x167b: 0xaf4a,
+ 0x167c: 0xafca, 0x167d: 0xaf5a, 0x167e: 0xb512, 0x167f: 0xaf1a,
+ // Block 0x5a, offset 0x1680
+ 0x1680: 0x1caa, 0x1681: 0x0039, 0x1682: 0x0ee9, 0x1683: 0x1159, 0x1684: 0x0ef9, 0x1685: 0x0f09,
+ 0x1686: 0x1199, 0x1687: 0x0f31, 0x1688: 0x0249, 0x1689: 0x0f41, 0x168a: 0x0259, 0x168b: 0x0f51,
+ 0x168c: 0x0359, 0x168d: 0x0f61, 0x168e: 0x0f71, 0x168f: 0x00d9, 0x1690: 0x0f99, 0x1691: 0x2039,
+ 0x1692: 0x0269, 0x1693: 0x01d9, 0x1694: 0x0fa9, 0x1695: 0x0fb9, 0x1696: 0x1089, 0x1697: 0x0279,
+ 0x1698: 0x0369, 0x1699: 0x0289, 0x169a: 0x13d1, 0x169b: 0xaf2a, 0x169c: 0xb522, 0x169d: 0xaf3a,
+ 0x169e: 0xb532, 0x169f: 0x810d, 0x16a0: 0x812d, 0x16a1: 0x29d1, 0x16a2: 0x814d, 0x16a3: 0x814d,
+ 0x16a4: 0x816d, 0x16a5: 0x818d, 0x16a6: 0x81ad, 0x16a7: 0x81cd, 0x16a8: 0x81ed, 0x16a9: 0x820d,
+ 0x16aa: 0x822d, 0x16ab: 0x824d, 0x16ac: 0x826d, 0x16ad: 0x828d, 0x16ae: 0x82ad, 0x16af: 0x82cd,
+ 0x16b0: 0x82ed, 0x16b1: 0x830d, 0x16b2: 0x832d, 0x16b3: 0x834d, 0x16b4: 0x836d, 0x16b5: 0x838d,
+ 0x16b6: 0x83ad, 0x16b7: 0x83cd, 0x16b8: 0x83ed, 0x16b9: 0x840d, 0x16ba: 0x842d, 0x16bb: 0x844d,
+ 0x16bc: 0x81ed, 0x16bd: 0x846d, 0x16be: 0x848d, 0x16bf: 0x824d,
+ // Block 0x5b, offset 0x16c0
+ 0x16c0: 0x84ad, 0x16c1: 0x84cd, 0x16c2: 0x84ed, 0x16c3: 0x850d, 0x16c4: 0x852d, 0x16c5: 0x854d,
+ 0x16c6: 0x856d, 0x16c7: 0x858d, 0x16c8: 0x850d, 0x16c9: 0x85ad, 0x16ca: 0x850d, 0x16cb: 0x85cd,
+ 0x16cc: 0x85cd, 0x16cd: 0x85ed, 0x16ce: 0x85ed, 0x16cf: 0x860d, 0x16d0: 0x854d, 0x16d1: 0x862d,
+ 0x16d2: 0x864d, 0x16d3: 0x862d, 0x16d4: 0x866d, 0x16d5: 0x864d, 0x16d6: 0x868d, 0x16d7: 0x868d,
+ 0x16d8: 0x86ad, 0x16d9: 0x86ad, 0x16da: 0x86cd, 0x16db: 0x86cd, 0x16dc: 0x864d, 0x16dd: 0x814d,
+ 0x16de: 0x86ed, 0x16df: 0x870d, 0x16e0: 0x0040, 0x16e1: 0x872d, 0x16e2: 0x874d, 0x16e3: 0x876d,
+ 0x16e4: 0x878d, 0x16e5: 0x876d, 0x16e6: 0x87ad, 0x16e7: 0x87cd, 0x16e8: 0x87ed, 0x16e9: 0x87ed,
+ 0x16ea: 0x880d, 0x16eb: 0x880d, 0x16ec: 0x882d, 0x16ed: 0x882d, 0x16ee: 0x880d, 0x16ef: 0x880d,
+ 0x16f0: 0x884d, 0x16f1: 0x886d, 0x16f2: 0x888d, 0x16f3: 0x88ad, 0x16f4: 0x88cd, 0x16f5: 0x88ed,
+ 0x16f6: 0x88ed, 0x16f7: 0x88ed, 0x16f8: 0x890d, 0x16f9: 0x890d, 0x16fa: 0x890d, 0x16fb: 0x890d,
+ 0x16fc: 0x87ed, 0x16fd: 0x87ed, 0x16fe: 0x87ed, 0x16ff: 0x0040,
+ // Block 0x5c, offset 0x1700
+ 0x1700: 0x0040, 0x1701: 0x0040, 0x1702: 0x874d, 0x1703: 0x872d, 0x1704: 0x892d, 0x1705: 0x872d,
+ 0x1706: 0x874d, 0x1707: 0x872d, 0x1708: 0x0040, 0x1709: 0x0040, 0x170a: 0x894d, 0x170b: 0x874d,
+ 0x170c: 0x896d, 0x170d: 0x892d, 0x170e: 0x896d, 0x170f: 0x874d, 0x1710: 0x0040, 0x1711: 0x0040,
+ 0x1712: 0x898d, 0x1713: 0x89ad, 0x1714: 0x88ad, 0x1715: 0x896d, 0x1716: 0x892d, 0x1717: 0x896d,
+ 0x1718: 0x0040, 0x1719: 0x0040, 0x171a: 0x89cd, 0x171b: 0x89ed, 0x171c: 0x89cd, 0x171d: 0x0040,
+ 0x171e: 0x0040, 0x171f: 0x0040, 0x1720: 0xb541, 0x1721: 0xb559, 0x1722: 0xb571, 0x1723: 0x8a0e,
+ 0x1724: 0xb589, 0x1725: 0xb5a1, 0x1726: 0x8a2d, 0x1727: 0x0040, 0x1728: 0x8a4d, 0x1729: 0x8a6d,
+ 0x172a: 0x8a8d, 0x172b: 0x8a6d, 0x172c: 0x8aad, 0x172d: 0x8acd, 0x172e: 0x8aed, 0x172f: 0x0040,
+ 0x1730: 0x0040, 0x1731: 0x0040, 0x1732: 0x0040, 0x1733: 0x0040, 0x1734: 0x0040, 0x1735: 0x0040,
+ 0x1736: 0x0040, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0340, 0x173a: 0x0340, 0x173b: 0x0340,
+ 0x173c: 0x0040, 0x173d: 0x0040, 0x173e: 0x0040, 0x173f: 0x0040,
+ // Block 0x5d, offset 0x1740
+ 0x1740: 0x0a08, 0x1741: 0x0a08, 0x1742: 0x0a08, 0x1743: 0x0a08, 0x1744: 0x0a08, 0x1745: 0x0c08,
+ 0x1746: 0x0808, 0x1747: 0x0c08, 0x1748: 0x0818, 0x1749: 0x0c08, 0x174a: 0x0c08, 0x174b: 0x0808,
+ 0x174c: 0x0808, 0x174d: 0x0908, 0x174e: 0x0c08, 0x174f: 0x0c08, 0x1750: 0x0c08, 0x1751: 0x0c08,
+ 0x1752: 0x0c08, 0x1753: 0x0a08, 0x1754: 0x0a08, 0x1755: 0x0a08, 0x1756: 0x0a08, 0x1757: 0x0908,
+ 0x1758: 0x0a08, 0x1759: 0x0a08, 0x175a: 0x0a08, 0x175b: 0x0a08, 0x175c: 0x0a08, 0x175d: 0x0c08,
+ 0x175e: 0x0a08, 0x175f: 0x0a08, 0x1760: 0x0a08, 0x1761: 0x0c08, 0x1762: 0x0808, 0x1763: 0x0808,
+ 0x1764: 0x0c08, 0x1765: 0x3308, 0x1766: 0x3308, 0x1767: 0x0040, 0x1768: 0x0040, 0x1769: 0x0040,
+ 0x176a: 0x0040, 0x176b: 0x0a18, 0x176c: 0x0a18, 0x176d: 0x0a18, 0x176e: 0x0a18, 0x176f: 0x0c18,
+ 0x1770: 0x0818, 0x1771: 0x0818, 0x1772: 0x0818, 0x1773: 0x0818, 0x1774: 0x0818, 0x1775: 0x0818,
+ 0x1776: 0x0818, 0x1777: 0x0040, 0x1778: 0x0040, 0x1779: 0x0040, 0x177a: 0x0040, 0x177b: 0x0040,
+ 0x177c: 0x0040, 0x177d: 0x0040, 0x177e: 0x0040, 0x177f: 0x0040,
+ // Block 0x5e, offset 0x1780
+ 0x1780: 0x0a08, 0x1781: 0x0c08, 0x1782: 0x0a08, 0x1783: 0x0c08, 0x1784: 0x0c08, 0x1785: 0x0c08,
+ 0x1786: 0x0a08, 0x1787: 0x0a08, 0x1788: 0x0a08, 0x1789: 0x0c08, 0x178a: 0x0a08, 0x178b: 0x0a08,
+ 0x178c: 0x0c08, 0x178d: 0x0a08, 0x178e: 0x0c08, 0x178f: 0x0c08, 0x1790: 0x0a08, 0x1791: 0x0c08,
+ 0x1792: 0x0040, 0x1793: 0x0040, 0x1794: 0x0040, 0x1795: 0x0040, 0x1796: 0x0040, 0x1797: 0x0040,
+ 0x1798: 0x0040, 0x1799: 0x0818, 0x179a: 0x0818, 0x179b: 0x0818, 0x179c: 0x0818, 0x179d: 0x0040,
+ 0x179e: 0x0040, 0x179f: 0x0040, 0x17a0: 0x0040, 0x17a1: 0x0040, 0x17a2: 0x0040, 0x17a3: 0x0040,
+ 0x17a4: 0x0040, 0x17a5: 0x0040, 0x17a6: 0x0040, 0x17a7: 0x0040, 0x17a8: 0x0040, 0x17a9: 0x0c18,
+ 0x17aa: 0x0c18, 0x17ab: 0x0c18, 0x17ac: 0x0c18, 0x17ad: 0x0a18, 0x17ae: 0x0a18, 0x17af: 0x0818,
+ 0x17b0: 0x0040, 0x17b1: 0x0040, 0x17b2: 0x0040, 0x17b3: 0x0040, 0x17b4: 0x0040, 0x17b5: 0x0040,
+ 0x17b6: 0x0040, 0x17b7: 0x0040, 0x17b8: 0x0040, 0x17b9: 0x0040, 0x17ba: 0x0040, 0x17bb: 0x0040,
+ 0x17bc: 0x0040, 0x17bd: 0x0040, 0x17be: 0x0040, 0x17bf: 0x0040,
+ // Block 0x5f, offset 0x17c0
+ 0x17c0: 0x3308, 0x17c1: 0x3308, 0x17c2: 0x3008, 0x17c3: 0x3008, 0x17c4: 0x0040, 0x17c5: 0x0008,
+ 0x17c6: 0x0008, 0x17c7: 0x0008, 0x17c8: 0x0008, 0x17c9: 0x0008, 0x17ca: 0x0008, 0x17cb: 0x0008,
+ 0x17cc: 0x0008, 0x17cd: 0x0040, 0x17ce: 0x0040, 0x17cf: 0x0008, 0x17d0: 0x0008, 0x17d1: 0x0040,
+ 0x17d2: 0x0040, 0x17d3: 0x0008, 0x17d4: 0x0008, 0x17d5: 0x0008, 0x17d6: 0x0008, 0x17d7: 0x0008,
+ 0x17d8: 0x0008, 0x17d9: 0x0008, 0x17da: 0x0008, 0x17db: 0x0008, 0x17dc: 0x0008, 0x17dd: 0x0008,
+ 0x17de: 0x0008, 0x17df: 0x0008, 0x17e0: 0x0008, 0x17e1: 0x0008, 0x17e2: 0x0008, 0x17e3: 0x0008,
+ 0x17e4: 0x0008, 0x17e5: 0x0008, 0x17e6: 0x0008, 0x17e7: 0x0008, 0x17e8: 0x0008, 0x17e9: 0x0040,
+ 0x17ea: 0x0008, 0x17eb: 0x0008, 0x17ec: 0x0008, 0x17ed: 0x0008, 0x17ee: 0x0008, 0x17ef: 0x0008,
+ 0x17f0: 0x0008, 0x17f1: 0x0040, 0x17f2: 0x0008, 0x17f3: 0x0008, 0x17f4: 0x0040, 0x17f5: 0x0008,
+ 0x17f6: 0x0008, 0x17f7: 0x0008, 0x17f8: 0x0008, 0x17f9: 0x0008, 0x17fa: 0x0040, 0x17fb: 0x3308,
+ 0x17fc: 0x3308, 0x17fd: 0x0008, 0x17fe: 0x3008, 0x17ff: 0x3008,
+ // Block 0x60, offset 0x1800
+ 0x1800: 0x3308, 0x1801: 0x3008, 0x1802: 0x3008, 0x1803: 0x3008, 0x1804: 0x3008, 0x1805: 0x0040,
+ 0x1806: 0x0040, 0x1807: 0x3008, 0x1808: 0x3008, 0x1809: 0x0040, 0x180a: 0x0040, 0x180b: 0x3008,
+ 0x180c: 0x3008, 0x180d: 0x3808, 0x180e: 0x0040, 0x180f: 0x0040, 0x1810: 0x0008, 0x1811: 0x0040,
+ 0x1812: 0x0040, 0x1813: 0x0040, 0x1814: 0x0040, 0x1815: 0x0040, 0x1816: 0x0040, 0x1817: 0x3008,
+ 0x1818: 0x0040, 0x1819: 0x0040, 0x181a: 0x0040, 0x181b: 0x0040, 0x181c: 0x0040, 0x181d: 0x0008,
+ 0x181e: 0x0008, 0x181f: 0x0008, 0x1820: 0x0008, 0x1821: 0x0008, 0x1822: 0x3008, 0x1823: 0x3008,
+ 0x1824: 0x0040, 0x1825: 0x0040, 0x1826: 0x3308, 0x1827: 0x3308, 0x1828: 0x3308, 0x1829: 0x3308,
+ 0x182a: 0x3308, 0x182b: 0x3308, 0x182c: 0x3308, 0x182d: 0x0040, 0x182e: 0x0040, 0x182f: 0x0040,
+ 0x1830: 0x3308, 0x1831: 0x3308, 0x1832: 0x3308, 0x1833: 0x3308, 0x1834: 0x3308, 0x1835: 0x0040,
+ 0x1836: 0x0040, 0x1837: 0x0040, 0x1838: 0x0040, 0x1839: 0x0040, 0x183a: 0x0040, 0x183b: 0x0040,
+ 0x183c: 0x0040, 0x183d: 0x0040, 0x183e: 0x0040, 0x183f: 0x0040,
+ // Block 0x61, offset 0x1840
+ 0x1840: 0x0039, 0x1841: 0x0ee9, 0x1842: 0x1159, 0x1843: 0x0ef9, 0x1844: 0x0f09, 0x1845: 0x1199,
+ 0x1846: 0x0f31, 0x1847: 0x0249, 0x1848: 0x0f41, 0x1849: 0x0259, 0x184a: 0x0f51, 0x184b: 0x0359,
+ 0x184c: 0x0f61, 0x184d: 0x0f71, 0x184e: 0x00d9, 0x184f: 0x0f99, 0x1850: 0x2039, 0x1851: 0x0269,
+ 0x1852: 0x01d9, 0x1853: 0x0fa9, 0x1854: 0x0fb9, 0x1855: 0x1089, 0x1856: 0x0279, 0x1857: 0x0369,
+ 0x1858: 0x0289, 0x1859: 0x13d1, 0x185a: 0x0039, 0x185b: 0x0ee9, 0x185c: 0x1159, 0x185d: 0x0ef9,
+ 0x185e: 0x0f09, 0x185f: 0x1199, 0x1860: 0x0f31, 0x1861: 0x0249, 0x1862: 0x0f41, 0x1863: 0x0259,
+ 0x1864: 0x0f51, 0x1865: 0x0359, 0x1866: 0x0f61, 0x1867: 0x0f71, 0x1868: 0x00d9, 0x1869: 0x0f99,
+ 0x186a: 0x2039, 0x186b: 0x0269, 0x186c: 0x01d9, 0x186d: 0x0fa9, 0x186e: 0x0fb9, 0x186f: 0x1089,
+ 0x1870: 0x0279, 0x1871: 0x0369, 0x1872: 0x0289, 0x1873: 0x13d1, 0x1874: 0x0039, 0x1875: 0x0ee9,
+ 0x1876: 0x1159, 0x1877: 0x0ef9, 0x1878: 0x0f09, 0x1879: 0x1199, 0x187a: 0x0f31, 0x187b: 0x0249,
+ 0x187c: 0x0f41, 0x187d: 0x0259, 0x187e: 0x0f51, 0x187f: 0x0359,
+ // Block 0x62, offset 0x1880
+ 0x1880: 0x0f61, 0x1881: 0x0f71, 0x1882: 0x00d9, 0x1883: 0x0f99, 0x1884: 0x2039, 0x1885: 0x0269,
+ 0x1886: 0x01d9, 0x1887: 0x0fa9, 0x1888: 0x0fb9, 0x1889: 0x1089, 0x188a: 0x0279, 0x188b: 0x0369,
+ 0x188c: 0x0289, 0x188d: 0x13d1, 0x188e: 0x0039, 0x188f: 0x0ee9, 0x1890: 0x1159, 0x1891: 0x0ef9,
+ 0x1892: 0x0f09, 0x1893: 0x1199, 0x1894: 0x0f31, 0x1895: 0x0040, 0x1896: 0x0f41, 0x1897: 0x0259,
+ 0x1898: 0x0f51, 0x1899: 0x0359, 0x189a: 0x0f61, 0x189b: 0x0f71, 0x189c: 0x00d9, 0x189d: 0x0f99,
+ 0x189e: 0x2039, 0x189f: 0x0269, 0x18a0: 0x01d9, 0x18a1: 0x0fa9, 0x18a2: 0x0fb9, 0x18a3: 0x1089,
+ 0x18a4: 0x0279, 0x18a5: 0x0369, 0x18a6: 0x0289, 0x18a7: 0x13d1, 0x18a8: 0x0039, 0x18a9: 0x0ee9,
+ 0x18aa: 0x1159, 0x18ab: 0x0ef9, 0x18ac: 0x0f09, 0x18ad: 0x1199, 0x18ae: 0x0f31, 0x18af: 0x0249,
+ 0x18b0: 0x0f41, 0x18b1: 0x0259, 0x18b2: 0x0f51, 0x18b3: 0x0359, 0x18b4: 0x0f61, 0x18b5: 0x0f71,
+ 0x18b6: 0x00d9, 0x18b7: 0x0f99, 0x18b8: 0x2039, 0x18b9: 0x0269, 0x18ba: 0x01d9, 0x18bb: 0x0fa9,
+ 0x18bc: 0x0fb9, 0x18bd: 0x1089, 0x18be: 0x0279, 0x18bf: 0x0369,
+ // Block 0x63, offset 0x18c0
+ 0x18c0: 0x0289, 0x18c1: 0x13d1, 0x18c2: 0x0039, 0x18c3: 0x0ee9, 0x18c4: 0x1159, 0x18c5: 0x0ef9,
+ 0x18c6: 0x0f09, 0x18c7: 0x1199, 0x18c8: 0x0f31, 0x18c9: 0x0249, 0x18ca: 0x0f41, 0x18cb: 0x0259,
+ 0x18cc: 0x0f51, 0x18cd: 0x0359, 0x18ce: 0x0f61, 0x18cf: 0x0f71, 0x18d0: 0x00d9, 0x18d1: 0x0f99,
+ 0x18d2: 0x2039, 0x18d3: 0x0269, 0x18d4: 0x01d9, 0x18d5: 0x0fa9, 0x18d6: 0x0fb9, 0x18d7: 0x1089,
+ 0x18d8: 0x0279, 0x18d9: 0x0369, 0x18da: 0x0289, 0x18db: 0x13d1, 0x18dc: 0x0039, 0x18dd: 0x0040,
+ 0x18de: 0x1159, 0x18df: 0x0ef9, 0x18e0: 0x0040, 0x18e1: 0x0040, 0x18e2: 0x0f31, 0x18e3: 0x0040,
+ 0x18e4: 0x0040, 0x18e5: 0x0259, 0x18e6: 0x0f51, 0x18e7: 0x0040, 0x18e8: 0x0040, 0x18e9: 0x0f71,
+ 0x18ea: 0x00d9, 0x18eb: 0x0f99, 0x18ec: 0x2039, 0x18ed: 0x0040, 0x18ee: 0x01d9, 0x18ef: 0x0fa9,
+ 0x18f0: 0x0fb9, 0x18f1: 0x1089, 0x18f2: 0x0279, 0x18f3: 0x0369, 0x18f4: 0x0289, 0x18f5: 0x13d1,
+ 0x18f6: 0x0039, 0x18f7: 0x0ee9, 0x18f8: 0x1159, 0x18f9: 0x0ef9, 0x18fa: 0x0040, 0x18fb: 0x1199,
+ 0x18fc: 0x0040, 0x18fd: 0x0249, 0x18fe: 0x0f41, 0x18ff: 0x0259,
+ // Block 0x64, offset 0x1900
+ 0x1900: 0x0f51, 0x1901: 0x0359, 0x1902: 0x0f61, 0x1903: 0x0f71, 0x1904: 0x0040, 0x1905: 0x0f99,
+ 0x1906: 0x2039, 0x1907: 0x0269, 0x1908: 0x01d9, 0x1909: 0x0fa9, 0x190a: 0x0fb9, 0x190b: 0x1089,
+ 0x190c: 0x0279, 0x190d: 0x0369, 0x190e: 0x0289, 0x190f: 0x13d1, 0x1910: 0x0039, 0x1911: 0x0ee9,
+ 0x1912: 0x1159, 0x1913: 0x0ef9, 0x1914: 0x0f09, 0x1915: 0x1199, 0x1916: 0x0f31, 0x1917: 0x0249,
+ 0x1918: 0x0f41, 0x1919: 0x0259, 0x191a: 0x0f51, 0x191b: 0x0359, 0x191c: 0x0f61, 0x191d: 0x0f71,
+ 0x191e: 0x00d9, 0x191f: 0x0f99, 0x1920: 0x2039, 0x1921: 0x0269, 0x1922: 0x01d9, 0x1923: 0x0fa9,
+ 0x1924: 0x0fb9, 0x1925: 0x1089, 0x1926: 0x0279, 0x1927: 0x0369, 0x1928: 0x0289, 0x1929: 0x13d1,
+ 0x192a: 0x0039, 0x192b: 0x0ee9, 0x192c: 0x1159, 0x192d: 0x0ef9, 0x192e: 0x0f09, 0x192f: 0x1199,
+ 0x1930: 0x0f31, 0x1931: 0x0249, 0x1932: 0x0f41, 0x1933: 0x0259, 0x1934: 0x0f51, 0x1935: 0x0359,
+ 0x1936: 0x0f61, 0x1937: 0x0f71, 0x1938: 0x00d9, 0x1939: 0x0f99, 0x193a: 0x2039, 0x193b: 0x0269,
+ 0x193c: 0x01d9, 0x193d: 0x0fa9, 0x193e: 0x0fb9, 0x193f: 0x1089,
+ // Block 0x65, offset 0x1940
+ 0x1940: 0x0279, 0x1941: 0x0369, 0x1942: 0x0289, 0x1943: 0x13d1, 0x1944: 0x0039, 0x1945: 0x0ee9,
+ 0x1946: 0x0040, 0x1947: 0x0ef9, 0x1948: 0x0f09, 0x1949: 0x1199, 0x194a: 0x0f31, 0x194b: 0x0040,
+ 0x194c: 0x0040, 0x194d: 0x0259, 0x194e: 0x0f51, 0x194f: 0x0359, 0x1950: 0x0f61, 0x1951: 0x0f71,
+ 0x1952: 0x00d9, 0x1953: 0x0f99, 0x1954: 0x2039, 0x1955: 0x0040, 0x1956: 0x01d9, 0x1957: 0x0fa9,
+ 0x1958: 0x0fb9, 0x1959: 0x1089, 0x195a: 0x0279, 0x195b: 0x0369, 0x195c: 0x0289, 0x195d: 0x0040,
+ 0x195e: 0x0039, 0x195f: 0x0ee9, 0x1960: 0x1159, 0x1961: 0x0ef9, 0x1962: 0x0f09, 0x1963: 0x1199,
+ 0x1964: 0x0f31, 0x1965: 0x0249, 0x1966: 0x0f41, 0x1967: 0x0259, 0x1968: 0x0f51, 0x1969: 0x0359,
+ 0x196a: 0x0f61, 0x196b: 0x0f71, 0x196c: 0x00d9, 0x196d: 0x0f99, 0x196e: 0x2039, 0x196f: 0x0269,
+ 0x1970: 0x01d9, 0x1971: 0x0fa9, 0x1972: 0x0fb9, 0x1973: 0x1089, 0x1974: 0x0279, 0x1975: 0x0369,
+ 0x1976: 0x0289, 0x1977: 0x13d1, 0x1978: 0x0039, 0x1979: 0x0ee9, 0x197a: 0x0040, 0x197b: 0x0ef9,
+ 0x197c: 0x0f09, 0x197d: 0x1199, 0x197e: 0x0f31, 0x197f: 0x0040,
+ // Block 0x66, offset 0x1980
+ 0x1980: 0x0f41, 0x1981: 0x0259, 0x1982: 0x0f51, 0x1983: 0x0359, 0x1984: 0x0f61, 0x1985: 0x0040,
+ 0x1986: 0x00d9, 0x1987: 0x0040, 0x1988: 0x0040, 0x1989: 0x0040, 0x198a: 0x01d9, 0x198b: 0x0fa9,
+ 0x198c: 0x0fb9, 0x198d: 0x1089, 0x198e: 0x0279, 0x198f: 0x0369, 0x1990: 0x0289, 0x1991: 0x0040,
+ 0x1992: 0x0039, 0x1993: 0x0ee9, 0x1994: 0x1159, 0x1995: 0x0ef9, 0x1996: 0x0f09, 0x1997: 0x1199,
+ 0x1998: 0x0f31, 0x1999: 0x0249, 0x199a: 0x0f41, 0x199b: 0x0259, 0x199c: 0x0f51, 0x199d: 0x0359,
+ 0x199e: 0x0f61, 0x199f: 0x0f71, 0x19a0: 0x00d9, 0x19a1: 0x0f99, 0x19a2: 0x2039, 0x19a3: 0x0269,
+ 0x19a4: 0x01d9, 0x19a5: 0x0fa9, 0x19a6: 0x0fb9, 0x19a7: 0x1089, 0x19a8: 0x0279, 0x19a9: 0x0369,
+ 0x19aa: 0x0289, 0x19ab: 0x13d1, 0x19ac: 0x0039, 0x19ad: 0x0ee9, 0x19ae: 0x1159, 0x19af: 0x0ef9,
+ 0x19b0: 0x0f09, 0x19b1: 0x1199, 0x19b2: 0x0f31, 0x19b3: 0x0249, 0x19b4: 0x0f41, 0x19b5: 0x0259,
+ 0x19b6: 0x0f51, 0x19b7: 0x0359, 0x19b8: 0x0f61, 0x19b9: 0x0f71, 0x19ba: 0x00d9, 0x19bb: 0x0f99,
+ 0x19bc: 0x2039, 0x19bd: 0x0269, 0x19be: 0x01d9, 0x19bf: 0x0fa9,
+ // Block 0x67, offset 0x19c0
+ 0x19c0: 0x0fb9, 0x19c1: 0x1089, 0x19c2: 0x0279, 0x19c3: 0x0369, 0x19c4: 0x0289, 0x19c5: 0x13d1,
+ 0x19c6: 0x0039, 0x19c7: 0x0ee9, 0x19c8: 0x1159, 0x19c9: 0x0ef9, 0x19ca: 0x0f09, 0x19cb: 0x1199,
+ 0x19cc: 0x0f31, 0x19cd: 0x0249, 0x19ce: 0x0f41, 0x19cf: 0x0259, 0x19d0: 0x0f51, 0x19d1: 0x0359,
+ 0x19d2: 0x0f61, 0x19d3: 0x0f71, 0x19d4: 0x00d9, 0x19d5: 0x0f99, 0x19d6: 0x2039, 0x19d7: 0x0269,
+ 0x19d8: 0x01d9, 0x19d9: 0x0fa9, 0x19da: 0x0fb9, 0x19db: 0x1089, 0x19dc: 0x0279, 0x19dd: 0x0369,
+ 0x19de: 0x0289, 0x19df: 0x13d1, 0x19e0: 0x0039, 0x19e1: 0x0ee9, 0x19e2: 0x1159, 0x19e3: 0x0ef9,
+ 0x19e4: 0x0f09, 0x19e5: 0x1199, 0x19e6: 0x0f31, 0x19e7: 0x0249, 0x19e8: 0x0f41, 0x19e9: 0x0259,
+ 0x19ea: 0x0f51, 0x19eb: 0x0359, 0x19ec: 0x0f61, 0x19ed: 0x0f71, 0x19ee: 0x00d9, 0x19ef: 0x0f99,
+ 0x19f0: 0x2039, 0x19f1: 0x0269, 0x19f2: 0x01d9, 0x19f3: 0x0fa9, 0x19f4: 0x0fb9, 0x19f5: 0x1089,
+ 0x19f6: 0x0279, 0x19f7: 0x0369, 0x19f8: 0x0289, 0x19f9: 0x13d1, 0x19fa: 0x0039, 0x19fb: 0x0ee9,
+ 0x19fc: 0x1159, 0x19fd: 0x0ef9, 0x19fe: 0x0f09, 0x19ff: 0x1199,
+ // Block 0x68, offset 0x1a00
+ 0x1a00: 0x0f31, 0x1a01: 0x0249, 0x1a02: 0x0f41, 0x1a03: 0x0259, 0x1a04: 0x0f51, 0x1a05: 0x0359,
+ 0x1a06: 0x0f61, 0x1a07: 0x0f71, 0x1a08: 0x00d9, 0x1a09: 0x0f99, 0x1a0a: 0x2039, 0x1a0b: 0x0269,
+ 0x1a0c: 0x01d9, 0x1a0d: 0x0fa9, 0x1a0e: 0x0fb9, 0x1a0f: 0x1089, 0x1a10: 0x0279, 0x1a11: 0x0369,
+ 0x1a12: 0x0289, 0x1a13: 0x13d1, 0x1a14: 0x0039, 0x1a15: 0x0ee9, 0x1a16: 0x1159, 0x1a17: 0x0ef9,
+ 0x1a18: 0x0f09, 0x1a19: 0x1199, 0x1a1a: 0x0f31, 0x1a1b: 0x0249, 0x1a1c: 0x0f41, 0x1a1d: 0x0259,
+ 0x1a1e: 0x0f51, 0x1a1f: 0x0359, 0x1a20: 0x0f61, 0x1a21: 0x0f71, 0x1a22: 0x00d9, 0x1a23: 0x0f99,
+ 0x1a24: 0x2039, 0x1a25: 0x0269, 0x1a26: 0x01d9, 0x1a27: 0x0fa9, 0x1a28: 0x0fb9, 0x1a29: 0x1089,
+ 0x1a2a: 0x0279, 0x1a2b: 0x0369, 0x1a2c: 0x0289, 0x1a2d: 0x13d1, 0x1a2e: 0x0039, 0x1a2f: 0x0ee9,
+ 0x1a30: 0x1159, 0x1a31: 0x0ef9, 0x1a32: 0x0f09, 0x1a33: 0x1199, 0x1a34: 0x0f31, 0x1a35: 0x0249,
+ 0x1a36: 0x0f41, 0x1a37: 0x0259, 0x1a38: 0x0f51, 0x1a39: 0x0359, 0x1a3a: 0x0f61, 0x1a3b: 0x0f71,
+ 0x1a3c: 0x00d9, 0x1a3d: 0x0f99, 0x1a3e: 0x2039, 0x1a3f: 0x0269,
+ // Block 0x69, offset 0x1a40
+ 0x1a40: 0x01d9, 0x1a41: 0x0fa9, 0x1a42: 0x0fb9, 0x1a43: 0x1089, 0x1a44: 0x0279, 0x1a45: 0x0369,
+ 0x1a46: 0x0289, 0x1a47: 0x13d1, 0x1a48: 0x0039, 0x1a49: 0x0ee9, 0x1a4a: 0x1159, 0x1a4b: 0x0ef9,
+ 0x1a4c: 0x0f09, 0x1a4d: 0x1199, 0x1a4e: 0x0f31, 0x1a4f: 0x0249, 0x1a50: 0x0f41, 0x1a51: 0x0259,
+ 0x1a52: 0x0f51, 0x1a53: 0x0359, 0x1a54: 0x0f61, 0x1a55: 0x0f71, 0x1a56: 0x00d9, 0x1a57: 0x0f99,
+ 0x1a58: 0x2039, 0x1a59: 0x0269, 0x1a5a: 0x01d9, 0x1a5b: 0x0fa9, 0x1a5c: 0x0fb9, 0x1a5d: 0x1089,
+ 0x1a5e: 0x0279, 0x1a5f: 0x0369, 0x1a60: 0x0289, 0x1a61: 0x13d1, 0x1a62: 0x0039, 0x1a63: 0x0ee9,
+ 0x1a64: 0x1159, 0x1a65: 0x0ef9, 0x1a66: 0x0f09, 0x1a67: 0x1199, 0x1a68: 0x0f31, 0x1a69: 0x0249,
+ 0x1a6a: 0x0f41, 0x1a6b: 0x0259, 0x1a6c: 0x0f51, 0x1a6d: 0x0359, 0x1a6e: 0x0f61, 0x1a6f: 0x0f71,
+ 0x1a70: 0x00d9, 0x1a71: 0x0f99, 0x1a72: 0x2039, 0x1a73: 0x0269, 0x1a74: 0x01d9, 0x1a75: 0x0fa9,
+ 0x1a76: 0x0fb9, 0x1a77: 0x1089, 0x1a78: 0x0279, 0x1a79: 0x0369, 0x1a7a: 0x0289, 0x1a7b: 0x13d1,
+ 0x1a7c: 0x0039, 0x1a7d: 0x0ee9, 0x1a7e: 0x1159, 0x1a7f: 0x0ef9,
+ // Block 0x6a, offset 0x1a80
+ 0x1a80: 0x0f09, 0x1a81: 0x1199, 0x1a82: 0x0f31, 0x1a83: 0x0249, 0x1a84: 0x0f41, 0x1a85: 0x0259,
+ 0x1a86: 0x0f51, 0x1a87: 0x0359, 0x1a88: 0x0f61, 0x1a89: 0x0f71, 0x1a8a: 0x00d9, 0x1a8b: 0x0f99,
+ 0x1a8c: 0x2039, 0x1a8d: 0x0269, 0x1a8e: 0x01d9, 0x1a8f: 0x0fa9, 0x1a90: 0x0fb9, 0x1a91: 0x1089,
+ 0x1a92: 0x0279, 0x1a93: 0x0369, 0x1a94: 0x0289, 0x1a95: 0x13d1, 0x1a96: 0x0039, 0x1a97: 0x0ee9,
+ 0x1a98: 0x1159, 0x1a99: 0x0ef9, 0x1a9a: 0x0f09, 0x1a9b: 0x1199, 0x1a9c: 0x0f31, 0x1a9d: 0x0249,
+ 0x1a9e: 0x0f41, 0x1a9f: 0x0259, 0x1aa0: 0x0f51, 0x1aa1: 0x0359, 0x1aa2: 0x0f61, 0x1aa3: 0x0f71,
+ 0x1aa4: 0x00d9, 0x1aa5: 0x0f99, 0x1aa6: 0x2039, 0x1aa7: 0x0269, 0x1aa8: 0x01d9, 0x1aa9: 0x0fa9,
+ 0x1aaa: 0x0fb9, 0x1aab: 0x1089, 0x1aac: 0x0279, 0x1aad: 0x0369, 0x1aae: 0x0289, 0x1aaf: 0x13d1,
+ 0x1ab0: 0x0039, 0x1ab1: 0x0ee9, 0x1ab2: 0x1159, 0x1ab3: 0x0ef9, 0x1ab4: 0x0f09, 0x1ab5: 0x1199,
+ 0x1ab6: 0x0f31, 0x1ab7: 0x0249, 0x1ab8: 0x0f41, 0x1ab9: 0x0259, 0x1aba: 0x0f51, 0x1abb: 0x0359,
+ 0x1abc: 0x0f61, 0x1abd: 0x0f71, 0x1abe: 0x00d9, 0x1abf: 0x0f99,
+ // Block 0x6b, offset 0x1ac0
+ 0x1ac0: 0x2039, 0x1ac1: 0x0269, 0x1ac2: 0x01d9, 0x1ac3: 0x0fa9, 0x1ac4: 0x0fb9, 0x1ac5: 0x1089,
+ 0x1ac6: 0x0279, 0x1ac7: 0x0369, 0x1ac8: 0x0289, 0x1ac9: 0x13d1, 0x1aca: 0x0039, 0x1acb: 0x0ee9,
+ 0x1acc: 0x1159, 0x1acd: 0x0ef9, 0x1ace: 0x0f09, 0x1acf: 0x1199, 0x1ad0: 0x0f31, 0x1ad1: 0x0249,
+ 0x1ad2: 0x0f41, 0x1ad3: 0x0259, 0x1ad4: 0x0f51, 0x1ad5: 0x0359, 0x1ad6: 0x0f61, 0x1ad7: 0x0f71,
+ 0x1ad8: 0x00d9, 0x1ad9: 0x0f99, 0x1ada: 0x2039, 0x1adb: 0x0269, 0x1adc: 0x01d9, 0x1add: 0x0fa9,
+ 0x1ade: 0x0fb9, 0x1adf: 0x1089, 0x1ae0: 0x0279, 0x1ae1: 0x0369, 0x1ae2: 0x0289, 0x1ae3: 0x13d1,
+ 0x1ae4: 0xba81, 0x1ae5: 0xba99, 0x1ae6: 0x0040, 0x1ae7: 0x0040, 0x1ae8: 0xbab1, 0x1ae9: 0x1099,
+ 0x1aea: 0x10b1, 0x1aeb: 0x10c9, 0x1aec: 0xbac9, 0x1aed: 0xbae1, 0x1aee: 0xbaf9, 0x1aef: 0x1429,
+ 0x1af0: 0x1a31, 0x1af1: 0xbb11, 0x1af2: 0xbb29, 0x1af3: 0xbb41, 0x1af4: 0xbb59, 0x1af5: 0xbb71,
+ 0x1af6: 0xbb89, 0x1af7: 0x2109, 0x1af8: 0x1111, 0x1af9: 0x1429, 0x1afa: 0xbba1, 0x1afb: 0xbbb9,
+ 0x1afc: 0xbbd1, 0x1afd: 0x10e1, 0x1afe: 0x10f9, 0x1aff: 0xbbe9,
+ // Block 0x6c, offset 0x1b00
+ 0x1b00: 0x2079, 0x1b01: 0xbc01, 0x1b02: 0xbab1, 0x1b03: 0x1099, 0x1b04: 0x10b1, 0x1b05: 0x10c9,
+ 0x1b06: 0xbac9, 0x1b07: 0xbae1, 0x1b08: 0xbaf9, 0x1b09: 0x1429, 0x1b0a: 0x1a31, 0x1b0b: 0xbb11,
+ 0x1b0c: 0xbb29, 0x1b0d: 0xbb41, 0x1b0e: 0xbb59, 0x1b0f: 0xbb71, 0x1b10: 0xbb89, 0x1b11: 0x2109,
+ 0x1b12: 0x1111, 0x1b13: 0xbba1, 0x1b14: 0xbba1, 0x1b15: 0xbbb9, 0x1b16: 0xbbd1, 0x1b17: 0x10e1,
+ 0x1b18: 0x10f9, 0x1b19: 0xbbe9, 0x1b1a: 0x2079, 0x1b1b: 0xbc21, 0x1b1c: 0xbac9, 0x1b1d: 0x1429,
+ 0x1b1e: 0xbb11, 0x1b1f: 0x10e1, 0x1b20: 0x1111, 0x1b21: 0x2109, 0x1b22: 0xbab1, 0x1b23: 0x1099,
+ 0x1b24: 0x10b1, 0x1b25: 0x10c9, 0x1b26: 0xbac9, 0x1b27: 0xbae1, 0x1b28: 0xbaf9, 0x1b29: 0x1429,
+ 0x1b2a: 0x1a31, 0x1b2b: 0xbb11, 0x1b2c: 0xbb29, 0x1b2d: 0xbb41, 0x1b2e: 0xbb59, 0x1b2f: 0xbb71,
+ 0x1b30: 0xbb89, 0x1b31: 0x2109, 0x1b32: 0x1111, 0x1b33: 0x1429, 0x1b34: 0xbba1, 0x1b35: 0xbbb9,
+ 0x1b36: 0xbbd1, 0x1b37: 0x10e1, 0x1b38: 0x10f9, 0x1b39: 0xbbe9, 0x1b3a: 0x2079, 0x1b3b: 0xbc01,
+ 0x1b3c: 0xbab1, 0x1b3d: 0x1099, 0x1b3e: 0x10b1, 0x1b3f: 0x10c9,
+ // Block 0x6d, offset 0x1b40
+ 0x1b40: 0xbac9, 0x1b41: 0xbae1, 0x1b42: 0xbaf9, 0x1b43: 0x1429, 0x1b44: 0x1a31, 0x1b45: 0xbb11,
+ 0x1b46: 0xbb29, 0x1b47: 0xbb41, 0x1b48: 0xbb59, 0x1b49: 0xbb71, 0x1b4a: 0xbb89, 0x1b4b: 0x2109,
+ 0x1b4c: 0x1111, 0x1b4d: 0xbba1, 0x1b4e: 0xbba1, 0x1b4f: 0xbbb9, 0x1b50: 0xbbd1, 0x1b51: 0x10e1,
+ 0x1b52: 0x10f9, 0x1b53: 0xbbe9, 0x1b54: 0x2079, 0x1b55: 0xbc21, 0x1b56: 0xbac9, 0x1b57: 0x1429,
+ 0x1b58: 0xbb11, 0x1b59: 0x10e1, 0x1b5a: 0x1111, 0x1b5b: 0x2109, 0x1b5c: 0xbab1, 0x1b5d: 0x1099,
+ 0x1b5e: 0x10b1, 0x1b5f: 0x10c9, 0x1b60: 0xbac9, 0x1b61: 0xbae1, 0x1b62: 0xbaf9, 0x1b63: 0x1429,
+ 0x1b64: 0x1a31, 0x1b65: 0xbb11, 0x1b66: 0xbb29, 0x1b67: 0xbb41, 0x1b68: 0xbb59, 0x1b69: 0xbb71,
+ 0x1b6a: 0xbb89, 0x1b6b: 0x2109, 0x1b6c: 0x1111, 0x1b6d: 0x1429, 0x1b6e: 0xbba1, 0x1b6f: 0xbbb9,
+ 0x1b70: 0xbbd1, 0x1b71: 0x10e1, 0x1b72: 0x10f9, 0x1b73: 0xbbe9, 0x1b74: 0x2079, 0x1b75: 0xbc01,
+ 0x1b76: 0xbab1, 0x1b77: 0x1099, 0x1b78: 0x10b1, 0x1b79: 0x10c9, 0x1b7a: 0xbac9, 0x1b7b: 0xbae1,
+ 0x1b7c: 0xbaf9, 0x1b7d: 0x1429, 0x1b7e: 0x1a31, 0x1b7f: 0xbb11,
+ // Block 0x6e, offset 0x1b80
+ 0x1b80: 0xbb29, 0x1b81: 0xbb41, 0x1b82: 0xbb59, 0x1b83: 0xbb71, 0x1b84: 0xbb89, 0x1b85: 0x2109,
+ 0x1b86: 0x1111, 0x1b87: 0xbba1, 0x1b88: 0xbba1, 0x1b89: 0xbbb9, 0x1b8a: 0xbbd1, 0x1b8b: 0x10e1,
+ 0x1b8c: 0x10f9, 0x1b8d: 0xbbe9, 0x1b8e: 0x2079, 0x1b8f: 0xbc21, 0x1b90: 0xbac9, 0x1b91: 0x1429,
+ 0x1b92: 0xbb11, 0x1b93: 0x10e1, 0x1b94: 0x1111, 0x1b95: 0x2109, 0x1b96: 0xbab1, 0x1b97: 0x1099,
+ 0x1b98: 0x10b1, 0x1b99: 0x10c9, 0x1b9a: 0xbac9, 0x1b9b: 0xbae1, 0x1b9c: 0xbaf9, 0x1b9d: 0x1429,
+ 0x1b9e: 0x1a31, 0x1b9f: 0xbb11, 0x1ba0: 0xbb29, 0x1ba1: 0xbb41, 0x1ba2: 0xbb59, 0x1ba3: 0xbb71,
+ 0x1ba4: 0xbb89, 0x1ba5: 0x2109, 0x1ba6: 0x1111, 0x1ba7: 0x1429, 0x1ba8: 0xbba1, 0x1ba9: 0xbbb9,
+ 0x1baa: 0xbbd1, 0x1bab: 0x10e1, 0x1bac: 0x10f9, 0x1bad: 0xbbe9, 0x1bae: 0x2079, 0x1baf: 0xbc01,
+ 0x1bb0: 0xbab1, 0x1bb1: 0x1099, 0x1bb2: 0x10b1, 0x1bb3: 0x10c9, 0x1bb4: 0xbac9, 0x1bb5: 0xbae1,
+ 0x1bb6: 0xbaf9, 0x1bb7: 0x1429, 0x1bb8: 0x1a31, 0x1bb9: 0xbb11, 0x1bba: 0xbb29, 0x1bbb: 0xbb41,
+ 0x1bbc: 0xbb59, 0x1bbd: 0xbb71, 0x1bbe: 0xbb89, 0x1bbf: 0x2109,
+ // Block 0x6f, offset 0x1bc0
+ 0x1bc0: 0x1111, 0x1bc1: 0xbba1, 0x1bc2: 0xbba1, 0x1bc3: 0xbbb9, 0x1bc4: 0xbbd1, 0x1bc5: 0x10e1,
+ 0x1bc6: 0x10f9, 0x1bc7: 0xbbe9, 0x1bc8: 0x2079, 0x1bc9: 0xbc21, 0x1bca: 0xbac9, 0x1bcb: 0x1429,
+ 0x1bcc: 0xbb11, 0x1bcd: 0x10e1, 0x1bce: 0x1111, 0x1bcf: 0x2109, 0x1bd0: 0xbab1, 0x1bd1: 0x1099,
+ 0x1bd2: 0x10b1, 0x1bd3: 0x10c9, 0x1bd4: 0xbac9, 0x1bd5: 0xbae1, 0x1bd6: 0xbaf9, 0x1bd7: 0x1429,
+ 0x1bd8: 0x1a31, 0x1bd9: 0xbb11, 0x1bda: 0xbb29, 0x1bdb: 0xbb41, 0x1bdc: 0xbb59, 0x1bdd: 0xbb71,
+ 0x1bde: 0xbb89, 0x1bdf: 0x2109, 0x1be0: 0x1111, 0x1be1: 0x1429, 0x1be2: 0xbba1, 0x1be3: 0xbbb9,
+ 0x1be4: 0xbbd1, 0x1be5: 0x10e1, 0x1be6: 0x10f9, 0x1be7: 0xbbe9, 0x1be8: 0x2079, 0x1be9: 0xbc01,
+ 0x1bea: 0xbab1, 0x1beb: 0x1099, 0x1bec: 0x10b1, 0x1bed: 0x10c9, 0x1bee: 0xbac9, 0x1bef: 0xbae1,
+ 0x1bf0: 0xbaf9, 0x1bf1: 0x1429, 0x1bf2: 0x1a31, 0x1bf3: 0xbb11, 0x1bf4: 0xbb29, 0x1bf5: 0xbb41,
+ 0x1bf6: 0xbb59, 0x1bf7: 0xbb71, 0x1bf8: 0xbb89, 0x1bf9: 0x2109, 0x1bfa: 0x1111, 0x1bfb: 0xbba1,
+ 0x1bfc: 0xbba1, 0x1bfd: 0xbbb9, 0x1bfe: 0xbbd1, 0x1bff: 0x10e1,
+ // Block 0x70, offset 0x1c00
+ 0x1c00: 0x10f9, 0x1c01: 0xbbe9, 0x1c02: 0x2079, 0x1c03: 0xbc21, 0x1c04: 0xbac9, 0x1c05: 0x1429,
+ 0x1c06: 0xbb11, 0x1c07: 0x10e1, 0x1c08: 0x1111, 0x1c09: 0x2109, 0x1c0a: 0xbc41, 0x1c0b: 0xbc41,
+ 0x1c0c: 0x0040, 0x1c0d: 0x0040, 0x1c0e: 0x1f41, 0x1c0f: 0x00c9, 0x1c10: 0x0069, 0x1c11: 0x0079,
+ 0x1c12: 0x1f51, 0x1c13: 0x1f61, 0x1c14: 0x1f71, 0x1c15: 0x1f81, 0x1c16: 0x1f91, 0x1c17: 0x1fa1,
+ 0x1c18: 0x1f41, 0x1c19: 0x00c9, 0x1c1a: 0x0069, 0x1c1b: 0x0079, 0x1c1c: 0x1f51, 0x1c1d: 0x1f61,
+ 0x1c1e: 0x1f71, 0x1c1f: 0x1f81, 0x1c20: 0x1f91, 0x1c21: 0x1fa1, 0x1c22: 0x1f41, 0x1c23: 0x00c9,
+ 0x1c24: 0x0069, 0x1c25: 0x0079, 0x1c26: 0x1f51, 0x1c27: 0x1f61, 0x1c28: 0x1f71, 0x1c29: 0x1f81,
+ 0x1c2a: 0x1f91, 0x1c2b: 0x1fa1, 0x1c2c: 0x1f41, 0x1c2d: 0x00c9, 0x1c2e: 0x0069, 0x1c2f: 0x0079,
+ 0x1c30: 0x1f51, 0x1c31: 0x1f61, 0x1c32: 0x1f71, 0x1c33: 0x1f81, 0x1c34: 0x1f91, 0x1c35: 0x1fa1,
+ 0x1c36: 0x1f41, 0x1c37: 0x00c9, 0x1c38: 0x0069, 0x1c39: 0x0079, 0x1c3a: 0x1f51, 0x1c3b: 0x1f61,
+ 0x1c3c: 0x1f71, 0x1c3d: 0x1f81, 0x1c3e: 0x1f91, 0x1c3f: 0x1fa1,
+ // Block 0x71, offset 0x1c40
+ 0x1c40: 0xe115, 0x1c41: 0xe115, 0x1c42: 0xe135, 0x1c43: 0xe135, 0x1c44: 0xe115, 0x1c45: 0xe115,
+ 0x1c46: 0xe175, 0x1c47: 0xe175, 0x1c48: 0xe115, 0x1c49: 0xe115, 0x1c4a: 0xe135, 0x1c4b: 0xe135,
+ 0x1c4c: 0xe115, 0x1c4d: 0xe115, 0x1c4e: 0xe1f5, 0x1c4f: 0xe1f5, 0x1c50: 0xe115, 0x1c51: 0xe115,
+ 0x1c52: 0xe135, 0x1c53: 0xe135, 0x1c54: 0xe115, 0x1c55: 0xe115, 0x1c56: 0xe175, 0x1c57: 0xe175,
+ 0x1c58: 0xe115, 0x1c59: 0xe115, 0x1c5a: 0xe135, 0x1c5b: 0xe135, 0x1c5c: 0xe115, 0x1c5d: 0xe115,
+ 0x1c5e: 0x8b3d, 0x1c5f: 0x8b3d, 0x1c60: 0x04b5, 0x1c61: 0x04b5, 0x1c62: 0x0a08, 0x1c63: 0x0a08,
+ 0x1c64: 0x0a08, 0x1c65: 0x0a08, 0x1c66: 0x0a08, 0x1c67: 0x0a08, 0x1c68: 0x0a08, 0x1c69: 0x0a08,
+ 0x1c6a: 0x0a08, 0x1c6b: 0x0a08, 0x1c6c: 0x0a08, 0x1c6d: 0x0a08, 0x1c6e: 0x0a08, 0x1c6f: 0x0a08,
+ 0x1c70: 0x0a08, 0x1c71: 0x0a08, 0x1c72: 0x0a08, 0x1c73: 0x0a08, 0x1c74: 0x0a08, 0x1c75: 0x0a08,
+ 0x1c76: 0x0a08, 0x1c77: 0x0a08, 0x1c78: 0x0a08, 0x1c79: 0x0a08, 0x1c7a: 0x0a08, 0x1c7b: 0x0a08,
+ 0x1c7c: 0x0a08, 0x1c7d: 0x0a08, 0x1c7e: 0x0a08, 0x1c7f: 0x0a08,
+ // Block 0x72, offset 0x1c80
+ 0x1c80: 0xb189, 0x1c81: 0xb1a1, 0x1c82: 0xb201, 0x1c83: 0xb249, 0x1c84: 0x0040, 0x1c85: 0xb411,
+ 0x1c86: 0xb291, 0x1c87: 0xb219, 0x1c88: 0xb309, 0x1c89: 0xb429, 0x1c8a: 0xb399, 0x1c8b: 0xb3b1,
+ 0x1c8c: 0xb3c9, 0x1c8d: 0xb3e1, 0x1c8e: 0xb2a9, 0x1c8f: 0xb339, 0x1c90: 0xb369, 0x1c91: 0xb2d9,
+ 0x1c92: 0xb381, 0x1c93: 0xb279, 0x1c94: 0xb2c1, 0x1c95: 0xb1d1, 0x1c96: 0xb1e9, 0x1c97: 0xb231,
+ 0x1c98: 0xb261, 0x1c99: 0xb2f1, 0x1c9a: 0xb321, 0x1c9b: 0xb351, 0x1c9c: 0xbc59, 0x1c9d: 0x7949,
+ 0x1c9e: 0xbc71, 0x1c9f: 0xbc89, 0x1ca0: 0x0040, 0x1ca1: 0xb1a1, 0x1ca2: 0xb201, 0x1ca3: 0x0040,
+ 0x1ca4: 0xb3f9, 0x1ca5: 0x0040, 0x1ca6: 0x0040, 0x1ca7: 0xb219, 0x1ca8: 0x0040, 0x1ca9: 0xb429,
+ 0x1caa: 0xb399, 0x1cab: 0xb3b1, 0x1cac: 0xb3c9, 0x1cad: 0xb3e1, 0x1cae: 0xb2a9, 0x1caf: 0xb339,
+ 0x1cb0: 0xb369, 0x1cb1: 0xb2d9, 0x1cb2: 0xb381, 0x1cb3: 0x0040, 0x1cb4: 0xb2c1, 0x1cb5: 0xb1d1,
+ 0x1cb6: 0xb1e9, 0x1cb7: 0xb231, 0x1cb8: 0x0040, 0x1cb9: 0xb2f1, 0x1cba: 0x0040, 0x1cbb: 0xb351,
+ 0x1cbc: 0x0040, 0x1cbd: 0x0040, 0x1cbe: 0x0040, 0x1cbf: 0x0040,
+ // Block 0x73, offset 0x1cc0
+ 0x1cc0: 0x0040, 0x1cc1: 0x0040, 0x1cc2: 0xb201, 0x1cc3: 0x0040, 0x1cc4: 0x0040, 0x1cc5: 0x0040,
+ 0x1cc6: 0x0040, 0x1cc7: 0xb219, 0x1cc8: 0x0040, 0x1cc9: 0xb429, 0x1cca: 0x0040, 0x1ccb: 0xb3b1,
+ 0x1ccc: 0x0040, 0x1ccd: 0xb3e1, 0x1cce: 0xb2a9, 0x1ccf: 0xb339, 0x1cd0: 0x0040, 0x1cd1: 0xb2d9,
+ 0x1cd2: 0xb381, 0x1cd3: 0x0040, 0x1cd4: 0xb2c1, 0x1cd5: 0x0040, 0x1cd6: 0x0040, 0x1cd7: 0xb231,
+ 0x1cd8: 0x0040, 0x1cd9: 0xb2f1, 0x1cda: 0x0040, 0x1cdb: 0xb351, 0x1cdc: 0x0040, 0x1cdd: 0x7949,
+ 0x1cde: 0x0040, 0x1cdf: 0xbc89, 0x1ce0: 0x0040, 0x1ce1: 0xb1a1, 0x1ce2: 0xb201, 0x1ce3: 0x0040,
+ 0x1ce4: 0xb3f9, 0x1ce5: 0x0040, 0x1ce6: 0x0040, 0x1ce7: 0xb219, 0x1ce8: 0xb309, 0x1ce9: 0xb429,
+ 0x1cea: 0xb399, 0x1ceb: 0x0040, 0x1cec: 0xb3c9, 0x1ced: 0xb3e1, 0x1cee: 0xb2a9, 0x1cef: 0xb339,
+ 0x1cf0: 0xb369, 0x1cf1: 0xb2d9, 0x1cf2: 0xb381, 0x1cf3: 0x0040, 0x1cf4: 0xb2c1, 0x1cf5: 0xb1d1,
+ 0x1cf6: 0xb1e9, 0x1cf7: 0xb231, 0x1cf8: 0x0040, 0x1cf9: 0xb2f1, 0x1cfa: 0xb321, 0x1cfb: 0xb351,
+ 0x1cfc: 0xbc59, 0x1cfd: 0x0040, 0x1cfe: 0xbc71, 0x1cff: 0x0040,
+ // Block 0x74, offset 0x1d00
+ 0x1d00: 0xb189, 0x1d01: 0xb1a1, 0x1d02: 0xb201, 0x1d03: 0xb249, 0x1d04: 0xb3f9, 0x1d05: 0xb411,
+ 0x1d06: 0xb291, 0x1d07: 0xb219, 0x1d08: 0xb309, 0x1d09: 0xb429, 0x1d0a: 0x0040, 0x1d0b: 0xb3b1,
+ 0x1d0c: 0xb3c9, 0x1d0d: 0xb3e1, 0x1d0e: 0xb2a9, 0x1d0f: 0xb339, 0x1d10: 0xb369, 0x1d11: 0xb2d9,
+ 0x1d12: 0xb381, 0x1d13: 0xb279, 0x1d14: 0xb2c1, 0x1d15: 0xb1d1, 0x1d16: 0xb1e9, 0x1d17: 0xb231,
+ 0x1d18: 0xb261, 0x1d19: 0xb2f1, 0x1d1a: 0xb321, 0x1d1b: 0xb351, 0x1d1c: 0x0040, 0x1d1d: 0x0040,
+ 0x1d1e: 0x0040, 0x1d1f: 0x0040, 0x1d20: 0x0040, 0x1d21: 0xb1a1, 0x1d22: 0xb201, 0x1d23: 0xb249,
+ 0x1d24: 0x0040, 0x1d25: 0xb411, 0x1d26: 0xb291, 0x1d27: 0xb219, 0x1d28: 0xb309, 0x1d29: 0xb429,
+ 0x1d2a: 0x0040, 0x1d2b: 0xb3b1, 0x1d2c: 0xb3c9, 0x1d2d: 0xb3e1, 0x1d2e: 0xb2a9, 0x1d2f: 0xb339,
+ 0x1d30: 0xb369, 0x1d31: 0xb2d9, 0x1d32: 0xb381, 0x1d33: 0xb279, 0x1d34: 0xb2c1, 0x1d35: 0xb1d1,
+ 0x1d36: 0xb1e9, 0x1d37: 0xb231, 0x1d38: 0xb261, 0x1d39: 0xb2f1, 0x1d3a: 0xb321, 0x1d3b: 0xb351,
+ 0x1d3c: 0x0040, 0x1d3d: 0x0040, 0x1d3e: 0x0040, 0x1d3f: 0x0040,
+ // Block 0x75, offset 0x1d40
+ 0x1d40: 0x0040, 0x1d41: 0xbca2, 0x1d42: 0xbcba, 0x1d43: 0xbcd2, 0x1d44: 0xbcea, 0x1d45: 0xbd02,
+ 0x1d46: 0xbd1a, 0x1d47: 0xbd32, 0x1d48: 0xbd4a, 0x1d49: 0xbd62, 0x1d4a: 0xbd7a, 0x1d4b: 0x0018,
+ 0x1d4c: 0x0018, 0x1d4d: 0x0040, 0x1d4e: 0x0040, 0x1d4f: 0x0040, 0x1d50: 0xbd92, 0x1d51: 0xbdb2,
+ 0x1d52: 0xbdd2, 0x1d53: 0xbdf2, 0x1d54: 0xbe12, 0x1d55: 0xbe32, 0x1d56: 0xbe52, 0x1d57: 0xbe72,
+ 0x1d58: 0xbe92, 0x1d59: 0xbeb2, 0x1d5a: 0xbed2, 0x1d5b: 0xbef2, 0x1d5c: 0xbf12, 0x1d5d: 0xbf32,
+ 0x1d5e: 0xbf52, 0x1d5f: 0xbf72, 0x1d60: 0xbf92, 0x1d61: 0xbfb2, 0x1d62: 0xbfd2, 0x1d63: 0xbff2,
+ 0x1d64: 0xc012, 0x1d65: 0xc032, 0x1d66: 0xc052, 0x1d67: 0xc072, 0x1d68: 0xc092, 0x1d69: 0xc0b2,
+ 0x1d6a: 0xc0d1, 0x1d6b: 0x1159, 0x1d6c: 0x0269, 0x1d6d: 0x6671, 0x1d6e: 0xc111, 0x1d6f: 0x0018,
+ 0x1d70: 0x0039, 0x1d71: 0x0ee9, 0x1d72: 0x1159, 0x1d73: 0x0ef9, 0x1d74: 0x0f09, 0x1d75: 0x1199,
+ 0x1d76: 0x0f31, 0x1d77: 0x0249, 0x1d78: 0x0f41, 0x1d79: 0x0259, 0x1d7a: 0x0f51, 0x1d7b: 0x0359,
+ 0x1d7c: 0x0f61, 0x1d7d: 0x0f71, 0x1d7e: 0x00d9, 0x1d7f: 0x0f99,
+ // Block 0x76, offset 0x1d80
+ 0x1d80: 0x2039, 0x1d81: 0x0269, 0x1d82: 0x01d9, 0x1d83: 0x0fa9, 0x1d84: 0x0fb9, 0x1d85: 0x1089,
+ 0x1d86: 0x0279, 0x1d87: 0x0369, 0x1d88: 0x0289, 0x1d89: 0x13d1, 0x1d8a: 0xc129, 0x1d8b: 0x65b1,
+ 0x1d8c: 0xc141, 0x1d8d: 0x1441, 0x1d8e: 0xc159, 0x1d8f: 0xc179, 0x1d90: 0x0018, 0x1d91: 0x0018,
+ 0x1d92: 0x0018, 0x1d93: 0x0018, 0x1d94: 0x0018, 0x1d95: 0x0018, 0x1d96: 0x0018, 0x1d97: 0x0018,
+ 0x1d98: 0x0018, 0x1d99: 0x0018, 0x1d9a: 0x0018, 0x1d9b: 0x0018, 0x1d9c: 0x0018, 0x1d9d: 0x0018,
+ 0x1d9e: 0x0018, 0x1d9f: 0x0018, 0x1da0: 0x0018, 0x1da1: 0x0018, 0x1da2: 0x0018, 0x1da3: 0x0018,
+ 0x1da4: 0x0018, 0x1da5: 0x0018, 0x1da6: 0x0018, 0x1da7: 0x0018, 0x1da8: 0x0018, 0x1da9: 0x0018,
+ 0x1daa: 0xc191, 0x1dab: 0xc1a9, 0x1dac: 0xc1c1, 0x1dad: 0x0040, 0x1dae: 0x0040, 0x1daf: 0x0040,
+ 0x1db0: 0x0018, 0x1db1: 0x0018, 0x1db2: 0x0018, 0x1db3: 0x0018, 0x1db4: 0x0018, 0x1db5: 0x0018,
+ 0x1db6: 0x0018, 0x1db7: 0x0018, 0x1db8: 0x0018, 0x1db9: 0x0018, 0x1dba: 0x0018, 0x1dbb: 0x0018,
+ 0x1dbc: 0x0018, 0x1dbd: 0x0018, 0x1dbe: 0x0018, 0x1dbf: 0x0018,
+ // Block 0x77, offset 0x1dc0
+ 0x1dc0: 0xc1f1, 0x1dc1: 0xc229, 0x1dc2: 0xc261, 0x1dc3: 0x0040, 0x1dc4: 0x0040, 0x1dc5: 0x0040,
+ 0x1dc6: 0x0040, 0x1dc7: 0x0040, 0x1dc8: 0x0040, 0x1dc9: 0x0040, 0x1dca: 0x0040, 0x1dcb: 0x0040,
+ 0x1dcc: 0x0040, 0x1dcd: 0x0040, 0x1dce: 0x0040, 0x1dcf: 0x0040, 0x1dd0: 0xc281, 0x1dd1: 0xc2a1,
+ 0x1dd2: 0xc2c1, 0x1dd3: 0xc2e1, 0x1dd4: 0xc301, 0x1dd5: 0xc321, 0x1dd6: 0xc341, 0x1dd7: 0xc361,
+ 0x1dd8: 0xc381, 0x1dd9: 0xc3a1, 0x1dda: 0xc3c1, 0x1ddb: 0xc3e1, 0x1ddc: 0xc401, 0x1ddd: 0xc421,
+ 0x1dde: 0xc441, 0x1ddf: 0xc461, 0x1de0: 0xc481, 0x1de1: 0xc4a1, 0x1de2: 0xc4c1, 0x1de3: 0xc4e1,
+ 0x1de4: 0xc501, 0x1de5: 0xc521, 0x1de6: 0xc541, 0x1de7: 0xc561, 0x1de8: 0xc581, 0x1de9: 0xc5a1,
+ 0x1dea: 0xc5c1, 0x1deb: 0xc5e1, 0x1dec: 0xc601, 0x1ded: 0xc621, 0x1dee: 0xc641, 0x1def: 0xc661,
+ 0x1df0: 0xc681, 0x1df1: 0xc6a1, 0x1df2: 0xc6c1, 0x1df3: 0xc6e1, 0x1df4: 0xc701, 0x1df5: 0xc721,
+ 0x1df6: 0xc741, 0x1df7: 0xc761, 0x1df8: 0xc781, 0x1df9: 0xc7a1, 0x1dfa: 0xc7c1, 0x1dfb: 0xc7e1,
+ 0x1dfc: 0x0040, 0x1dfd: 0x0040, 0x1dfe: 0x0040, 0x1dff: 0x0040,
+ // Block 0x78, offset 0x1e00
+ 0x1e00: 0xcb11, 0x1e01: 0xcb31, 0x1e02: 0xcb51, 0x1e03: 0x8b55, 0x1e04: 0xcb71, 0x1e05: 0xcb91,
+ 0x1e06: 0xcbb1, 0x1e07: 0xcbd1, 0x1e08: 0xcbf1, 0x1e09: 0xcc11, 0x1e0a: 0xcc31, 0x1e0b: 0xcc51,
+ 0x1e0c: 0xcc71, 0x1e0d: 0x8b75, 0x1e0e: 0xcc91, 0x1e0f: 0xccb1, 0x1e10: 0xccd1, 0x1e11: 0xccf1,
+ 0x1e12: 0x8b95, 0x1e13: 0xcd11, 0x1e14: 0xcd31, 0x1e15: 0xc441, 0x1e16: 0x8bb5, 0x1e17: 0xcd51,
+ 0x1e18: 0xcd71, 0x1e19: 0xcd91, 0x1e1a: 0xcdb1, 0x1e1b: 0xcdd1, 0x1e1c: 0x8bd5, 0x1e1d: 0xcdf1,
+ 0x1e1e: 0xce11, 0x1e1f: 0xce31, 0x1e20: 0xce51, 0x1e21: 0xce71, 0x1e22: 0xc7a1, 0x1e23: 0xce91,
+ 0x1e24: 0xceb1, 0x1e25: 0xced1, 0x1e26: 0xcef1, 0x1e27: 0xcf11, 0x1e28: 0xcf31, 0x1e29: 0xcf51,
+ 0x1e2a: 0xcf71, 0x1e2b: 0xcf91, 0x1e2c: 0xcfb1, 0x1e2d: 0xcfd1, 0x1e2e: 0xcff1, 0x1e2f: 0xd011,
+ 0x1e30: 0xd031, 0x1e31: 0xd051, 0x1e32: 0xd051, 0x1e33: 0xd051, 0x1e34: 0x8bf5, 0x1e35: 0xd071,
+ 0x1e36: 0xd091, 0x1e37: 0xd0b1, 0x1e38: 0x8c15, 0x1e39: 0xd0d1, 0x1e3a: 0xd0f1, 0x1e3b: 0xd111,
+ 0x1e3c: 0xd131, 0x1e3d: 0xd151, 0x1e3e: 0xd171, 0x1e3f: 0xd191,
+ // Block 0x79, offset 0x1e40
+ 0x1e40: 0xd1b1, 0x1e41: 0xd1d1, 0x1e42: 0xd1f1, 0x1e43: 0xd211, 0x1e44: 0xd231, 0x1e45: 0xd251,
+ 0x1e46: 0xd251, 0x1e47: 0xd271, 0x1e48: 0xd291, 0x1e49: 0xd2b1, 0x1e4a: 0xd2d1, 0x1e4b: 0xd2f1,
+ 0x1e4c: 0xd311, 0x1e4d: 0xd331, 0x1e4e: 0xd351, 0x1e4f: 0xd371, 0x1e50: 0xd391, 0x1e51: 0xd3b1,
+ 0x1e52: 0xd3d1, 0x1e53: 0xd3f1, 0x1e54: 0xd411, 0x1e55: 0xd431, 0x1e56: 0xd451, 0x1e57: 0xd471,
+ 0x1e58: 0xd491, 0x1e59: 0x8c35, 0x1e5a: 0xd4b1, 0x1e5b: 0xd4d1, 0x1e5c: 0xd4f1, 0x1e5d: 0xc321,
+ 0x1e5e: 0xd511, 0x1e5f: 0xd531, 0x1e60: 0x8c55, 0x1e61: 0x8c75, 0x1e62: 0xd551, 0x1e63: 0xd571,
+ 0x1e64: 0xd591, 0x1e65: 0xd5b1, 0x1e66: 0xd5d1, 0x1e67: 0xd5f1, 0x1e68: 0x2040, 0x1e69: 0xd611,
+ 0x1e6a: 0xd631, 0x1e6b: 0xd631, 0x1e6c: 0x8c95, 0x1e6d: 0xd651, 0x1e6e: 0xd671, 0x1e6f: 0xd691,
+ 0x1e70: 0xd6b1, 0x1e71: 0x8cb5, 0x1e72: 0xd6d1, 0x1e73: 0xd6f1, 0x1e74: 0x2040, 0x1e75: 0xd711,
+ 0x1e76: 0xd731, 0x1e77: 0xd751, 0x1e78: 0xd771, 0x1e79: 0xd791, 0x1e7a: 0xd7b1, 0x1e7b: 0x8cd5,
+ 0x1e7c: 0xd7d1, 0x1e7d: 0x8cf5, 0x1e7e: 0xd7f1, 0x1e7f: 0xd811,
+ // Block 0x7a, offset 0x1e80
+ 0x1e80: 0xd831, 0x1e81: 0xd851, 0x1e82: 0xd871, 0x1e83: 0xd891, 0x1e84: 0xd8b1, 0x1e85: 0xd8d1,
+ 0x1e86: 0xd8f1, 0x1e87: 0xd911, 0x1e88: 0xd931, 0x1e89: 0x8d15, 0x1e8a: 0xd951, 0x1e8b: 0xd971,
+ 0x1e8c: 0xd991, 0x1e8d: 0xd9b1, 0x1e8e: 0xd9d1, 0x1e8f: 0x8d35, 0x1e90: 0xd9f1, 0x1e91: 0x8d55,
+ 0x1e92: 0x8d75, 0x1e93: 0xda11, 0x1e94: 0xda31, 0x1e95: 0xda31, 0x1e96: 0xda51, 0x1e97: 0x8d95,
+ 0x1e98: 0x8db5, 0x1e99: 0xda71, 0x1e9a: 0xda91, 0x1e9b: 0xdab1, 0x1e9c: 0xdad1, 0x1e9d: 0xdaf1,
+ 0x1e9e: 0xdb11, 0x1e9f: 0xdb31, 0x1ea0: 0xdb51, 0x1ea1: 0xdb71, 0x1ea2: 0xdb91, 0x1ea3: 0xdbb1,
+ 0x1ea4: 0x8dd5, 0x1ea5: 0xdbd1, 0x1ea6: 0xdbf1, 0x1ea7: 0xdc11, 0x1ea8: 0xdc31, 0x1ea9: 0xdc11,
+ 0x1eaa: 0xdc51, 0x1eab: 0xdc71, 0x1eac: 0xdc91, 0x1ead: 0xdcb1, 0x1eae: 0xdcd1, 0x1eaf: 0xdcf1,
+ 0x1eb0: 0xdd11, 0x1eb1: 0xdd31, 0x1eb2: 0xdd51, 0x1eb3: 0xdd71, 0x1eb4: 0xdd91, 0x1eb5: 0xddb1,
+ 0x1eb6: 0xddd1, 0x1eb7: 0xddf1, 0x1eb8: 0x8df5, 0x1eb9: 0xde11, 0x1eba: 0xde31, 0x1ebb: 0xde51,
+ 0x1ebc: 0xde71, 0x1ebd: 0xde91, 0x1ebe: 0x8e15, 0x1ebf: 0xdeb1,
+ // Block 0x7b, offset 0x1ec0
+ 0x1ec0: 0xe5b1, 0x1ec1: 0xe5d1, 0x1ec2: 0xe5f1, 0x1ec3: 0xe611, 0x1ec4: 0xe631, 0x1ec5: 0xe651,
+ 0x1ec6: 0x8f35, 0x1ec7: 0xe671, 0x1ec8: 0xe691, 0x1ec9: 0xe6b1, 0x1eca: 0xe6d1, 0x1ecb: 0xe6f1,
+ 0x1ecc: 0xe711, 0x1ecd: 0x8f55, 0x1ece: 0xe731, 0x1ecf: 0xe751, 0x1ed0: 0x8f75, 0x1ed1: 0x8f95,
+ 0x1ed2: 0xe771, 0x1ed3: 0xe791, 0x1ed4: 0xe7b1, 0x1ed5: 0xe7d1, 0x1ed6: 0xe7f1, 0x1ed7: 0xe811,
+ 0x1ed8: 0xe831, 0x1ed9: 0xe851, 0x1eda: 0xe871, 0x1edb: 0x8fb5, 0x1edc: 0xe891, 0x1edd: 0x8fd5,
+ 0x1ede: 0xe8b1, 0x1edf: 0x2040, 0x1ee0: 0xe8d1, 0x1ee1: 0xe8f1, 0x1ee2: 0xe911, 0x1ee3: 0x8ff5,
+ 0x1ee4: 0xe931, 0x1ee5: 0xe951, 0x1ee6: 0x9015, 0x1ee7: 0x9035, 0x1ee8: 0xe971, 0x1ee9: 0xe991,
+ 0x1eea: 0xe9b1, 0x1eeb: 0xe9d1, 0x1eec: 0xe9f1, 0x1eed: 0xe9f1, 0x1eee: 0xea11, 0x1eef: 0xea31,
+ 0x1ef0: 0xea51, 0x1ef1: 0xea71, 0x1ef2: 0xea91, 0x1ef3: 0xeab1, 0x1ef4: 0xead1, 0x1ef5: 0x9055,
+ 0x1ef6: 0xeaf1, 0x1ef7: 0x9075, 0x1ef8: 0xeb11, 0x1ef9: 0x9095, 0x1efa: 0xeb31, 0x1efb: 0x90b5,
+ 0x1efc: 0x90d5, 0x1efd: 0x90f5, 0x1efe: 0xeb51, 0x1eff: 0xeb71,
+ // Block 0x7c, offset 0x1f00
+ 0x1f00: 0xeb91, 0x1f01: 0x9115, 0x1f02: 0x9135, 0x1f03: 0x9155, 0x1f04: 0x9175, 0x1f05: 0xebb1,
+ 0x1f06: 0xebd1, 0x1f07: 0xebd1, 0x1f08: 0xebf1, 0x1f09: 0xec11, 0x1f0a: 0xec31, 0x1f0b: 0xec51,
+ 0x1f0c: 0xec71, 0x1f0d: 0x9195, 0x1f0e: 0xec91, 0x1f0f: 0xecb1, 0x1f10: 0xecd1, 0x1f11: 0xecf1,
+ 0x1f12: 0x91b5, 0x1f13: 0xed11, 0x1f14: 0x91d5, 0x1f15: 0x91f5, 0x1f16: 0xed31, 0x1f17: 0xed51,
+ 0x1f18: 0xed71, 0x1f19: 0xed91, 0x1f1a: 0xedb1, 0x1f1b: 0xedd1, 0x1f1c: 0x9215, 0x1f1d: 0x9235,
+ 0x1f1e: 0x9255, 0x1f1f: 0x2040, 0x1f20: 0xedf1, 0x1f21: 0x9275, 0x1f22: 0xee11, 0x1f23: 0xee31,
+ 0x1f24: 0xee51, 0x1f25: 0x9295, 0x1f26: 0xee71, 0x1f27: 0xee91, 0x1f28: 0xeeb1, 0x1f29: 0xeed1,
+ 0x1f2a: 0xeef1, 0x1f2b: 0x92b5, 0x1f2c: 0xef11, 0x1f2d: 0xef31, 0x1f2e: 0xef51, 0x1f2f: 0xef71,
+ 0x1f30: 0xef91, 0x1f31: 0xefb1, 0x1f32: 0x92d5, 0x1f33: 0x92f5, 0x1f34: 0xefd1, 0x1f35: 0x9315,
+ 0x1f36: 0xeff1, 0x1f37: 0x9335, 0x1f38: 0xf011, 0x1f39: 0xf031, 0x1f3a: 0xf051, 0x1f3b: 0x9355,
+ 0x1f3c: 0x9375, 0x1f3d: 0xf071, 0x1f3e: 0x9395, 0x1f3f: 0xf091,
+ // Block 0x7d, offset 0x1f40
+ 0x1f40: 0xf6d1, 0x1f41: 0xf6f1, 0x1f42: 0xf711, 0x1f43: 0xf731, 0x1f44: 0xf751, 0x1f45: 0x9555,
+ 0x1f46: 0xf771, 0x1f47: 0xf791, 0x1f48: 0xf7b1, 0x1f49: 0xf7d1, 0x1f4a: 0xf7f1, 0x1f4b: 0x9575,
+ 0x1f4c: 0x9595, 0x1f4d: 0xf811, 0x1f4e: 0xf831, 0x1f4f: 0xf851, 0x1f50: 0xf871, 0x1f51: 0xf891,
+ 0x1f52: 0xf8b1, 0x1f53: 0x95b5, 0x1f54: 0xf8d1, 0x1f55: 0xf8f1, 0x1f56: 0xf911, 0x1f57: 0xf931,
+ 0x1f58: 0x95d5, 0x1f59: 0x95f5, 0x1f5a: 0xf951, 0x1f5b: 0xf971, 0x1f5c: 0xf991, 0x1f5d: 0x9615,
+ 0x1f5e: 0xf9b1, 0x1f5f: 0xf9d1, 0x1f60: 0x684d, 0x1f61: 0x9635, 0x1f62: 0xf9f1, 0x1f63: 0xfa11,
+ 0x1f64: 0xfa31, 0x1f65: 0x9655, 0x1f66: 0xfa51, 0x1f67: 0xfa71, 0x1f68: 0xfa91, 0x1f69: 0xfab1,
+ 0x1f6a: 0xfad1, 0x1f6b: 0xfaf1, 0x1f6c: 0xfb11, 0x1f6d: 0x9675, 0x1f6e: 0xfb31, 0x1f6f: 0xfb51,
+ 0x1f70: 0xfb71, 0x1f71: 0x9695, 0x1f72: 0xfb91, 0x1f73: 0xfbb1, 0x1f74: 0xfbd1, 0x1f75: 0xfbf1,
+ 0x1f76: 0x7b6d, 0x1f77: 0x96b5, 0x1f78: 0xfc11, 0x1f79: 0xfc31, 0x1f7a: 0xfc51, 0x1f7b: 0x96d5,
+ 0x1f7c: 0xfc71, 0x1f7d: 0x96f5, 0x1f7e: 0xfc91, 0x1f7f: 0xfc91,
+ // Block 0x7e, offset 0x1f80
+ 0x1f80: 0xfcb1, 0x1f81: 0x9715, 0x1f82: 0xfcd1, 0x1f83: 0xfcf1, 0x1f84: 0xfd11, 0x1f85: 0xfd31,
+ 0x1f86: 0xfd51, 0x1f87: 0xfd71, 0x1f88: 0xfd91, 0x1f89: 0x9735, 0x1f8a: 0xfdb1, 0x1f8b: 0xfdd1,
+ 0x1f8c: 0xfdf1, 0x1f8d: 0xfe11, 0x1f8e: 0xfe31, 0x1f8f: 0xfe51, 0x1f90: 0x9755, 0x1f91: 0xfe71,
+ 0x1f92: 0x9775, 0x1f93: 0x9795, 0x1f94: 0x97b5, 0x1f95: 0xfe91, 0x1f96: 0xfeb1, 0x1f97: 0xfed1,
+ 0x1f98: 0xfef1, 0x1f99: 0xff11, 0x1f9a: 0xff31, 0x1f9b: 0xff51, 0x1f9c: 0xff71, 0x1f9d: 0x97d5,
+ 0x1f9e: 0x0040, 0x1f9f: 0x0040, 0x1fa0: 0x0040, 0x1fa1: 0x0040, 0x1fa2: 0x0040, 0x1fa3: 0x0040,
+ 0x1fa4: 0x0040, 0x1fa5: 0x0040, 0x1fa6: 0x0040, 0x1fa7: 0x0040, 0x1fa8: 0x0040, 0x1fa9: 0x0040,
+ 0x1faa: 0x0040, 0x1fab: 0x0040, 0x1fac: 0x0040, 0x1fad: 0x0040, 0x1fae: 0x0040, 0x1faf: 0x0040,
+ 0x1fb0: 0x0040, 0x1fb1: 0x0040, 0x1fb2: 0x0040, 0x1fb3: 0x0040, 0x1fb4: 0x0040, 0x1fb5: 0x0040,
+ 0x1fb6: 0x0040, 0x1fb7: 0x0040, 0x1fb8: 0x0040, 0x1fb9: 0x0040, 0x1fba: 0x0040, 0x1fbb: 0x0040,
+ 0x1fbc: 0x0040, 0x1fbd: 0x0040, 0x1fbe: 0x0040, 0x1fbf: 0x0040,
+}
+
+// idnaIndex: 36 blocks, 2304 entries, 4608 bytes
+// Block 0 is the zero block.
+var idnaIndex = [2304]uint16{
+ // Block 0x0, offset 0x0
+ // Block 0x1, offset 0x40
+ // Block 0x2, offset 0x80
+ // Block 0x3, offset 0xc0
+ 0xc2: 0x01, 0xc3: 0x7d, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x04, 0xc7: 0x05,
+ 0xc8: 0x06, 0xc9: 0x7e, 0xca: 0x7f, 0xcb: 0x07, 0xcc: 0x80, 0xcd: 0x08, 0xce: 0x09, 0xcf: 0x0a,
+ 0xd0: 0x81, 0xd1: 0x0b, 0xd2: 0x0c, 0xd3: 0x0d, 0xd4: 0x0e, 0xd5: 0x82, 0xd6: 0x83, 0xd7: 0x84,
+ 0xd8: 0x0f, 0xd9: 0x10, 0xda: 0x85, 0xdb: 0x11, 0xdc: 0x12, 0xdd: 0x86, 0xde: 0x87, 0xdf: 0x88,
+ 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xe5: 0x07, 0xe6: 0x07, 0xe7: 0x07,
+ 0xe8: 0x07, 0xe9: 0x08, 0xea: 0x09, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x0a, 0xee: 0x0b, 0xef: 0x0c,
+ 0xf0: 0x1d, 0xf1: 0x1e, 0xf2: 0x1e, 0xf3: 0x20, 0xf4: 0x21,
+ // Block 0x4, offset 0x100
+ 0x120: 0x89, 0x121: 0x13, 0x122: 0x8a, 0x123: 0x8b, 0x124: 0x8c, 0x125: 0x14, 0x126: 0x15, 0x127: 0x16,
+ 0x128: 0x17, 0x129: 0x18, 0x12a: 0x19, 0x12b: 0x1a, 0x12c: 0x1b, 0x12d: 0x1c, 0x12e: 0x1d, 0x12f: 0x8d,
+ 0x130: 0x8e, 0x131: 0x1e, 0x132: 0x1f, 0x133: 0x20, 0x134: 0x8f, 0x135: 0x21, 0x136: 0x90, 0x137: 0x91,
+ 0x138: 0x92, 0x139: 0x93, 0x13a: 0x22, 0x13b: 0x94, 0x13c: 0x95, 0x13d: 0x23, 0x13e: 0x24, 0x13f: 0x96,
+ // Block 0x5, offset 0x140
+ 0x140: 0x97, 0x141: 0x98, 0x142: 0x99, 0x143: 0x9a, 0x144: 0x9b, 0x145: 0x9c, 0x146: 0x9d, 0x147: 0x9e,
+ 0x148: 0x9f, 0x149: 0xa0, 0x14a: 0xa1, 0x14b: 0xa2, 0x14c: 0xa3, 0x14d: 0xa4, 0x14e: 0xa5, 0x14f: 0xa6,
+ 0x150: 0xa7, 0x151: 0x9f, 0x152: 0x9f, 0x153: 0x9f, 0x154: 0x9f, 0x155: 0x9f, 0x156: 0x9f, 0x157: 0x9f,
+ 0x158: 0x9f, 0x159: 0xa8, 0x15a: 0xa9, 0x15b: 0xaa, 0x15c: 0xab, 0x15d: 0xac, 0x15e: 0xad, 0x15f: 0xae,
+ 0x160: 0xaf, 0x161: 0xb0, 0x162: 0xb1, 0x163: 0xb2, 0x164: 0xb3, 0x165: 0xb4, 0x166: 0xb5, 0x167: 0xb6,
+ 0x168: 0xb7, 0x169: 0xb8, 0x16a: 0xb9, 0x16b: 0xba, 0x16c: 0xbb, 0x16d: 0xbc, 0x16e: 0xbd, 0x16f: 0xbe,
+ 0x170: 0xbf, 0x171: 0xc0, 0x172: 0xc1, 0x173: 0xc2, 0x174: 0x25, 0x175: 0x26, 0x176: 0x27, 0x177: 0xc3,
+ 0x178: 0x28, 0x179: 0x28, 0x17a: 0x29, 0x17b: 0x28, 0x17c: 0xc4, 0x17d: 0x2a, 0x17e: 0x2b, 0x17f: 0x2c,
+ // Block 0x6, offset 0x180
+ 0x180: 0x2d, 0x181: 0x2e, 0x182: 0x2f, 0x183: 0xc5, 0x184: 0x30, 0x185: 0x31, 0x186: 0xc6, 0x187: 0x9b,
+ 0x188: 0xc7, 0x189: 0xc8, 0x18a: 0x9b, 0x18b: 0x9b, 0x18c: 0xc9, 0x18d: 0x9b, 0x18e: 0x9b, 0x18f: 0x9b,
+ 0x190: 0xca, 0x191: 0x32, 0x192: 0x33, 0x193: 0x34, 0x194: 0x9b, 0x195: 0x9b, 0x196: 0x9b, 0x197: 0x9b,
+ 0x198: 0x9b, 0x199: 0x9b, 0x19a: 0x9b, 0x19b: 0x9b, 0x19c: 0x9b, 0x19d: 0x9b, 0x19e: 0x9b, 0x19f: 0x9b,
+ 0x1a0: 0x9b, 0x1a1: 0x9b, 0x1a2: 0x9b, 0x1a3: 0x9b, 0x1a4: 0x9b, 0x1a5: 0x9b, 0x1a6: 0x9b, 0x1a7: 0x9b,
+ 0x1a8: 0xcb, 0x1a9: 0xcc, 0x1aa: 0x9b, 0x1ab: 0xcd, 0x1ac: 0x9b, 0x1ad: 0xce, 0x1ae: 0xcf, 0x1af: 0x9b,
+ 0x1b0: 0xd0, 0x1b1: 0x35, 0x1b2: 0x28, 0x1b3: 0x36, 0x1b4: 0xd1, 0x1b5: 0xd2, 0x1b6: 0xd3, 0x1b7: 0xd4,
+ 0x1b8: 0xd5, 0x1b9: 0xd6, 0x1ba: 0xd7, 0x1bb: 0xd8, 0x1bc: 0xd9, 0x1bd: 0xda, 0x1be: 0xdb, 0x1bf: 0x37,
+ // Block 0x7, offset 0x1c0
+ 0x1c0: 0x38, 0x1c1: 0xdc, 0x1c2: 0xdd, 0x1c3: 0xde, 0x1c4: 0xdf, 0x1c5: 0x39, 0x1c6: 0x3a, 0x1c7: 0xe0,
+ 0x1c8: 0xe1, 0x1c9: 0x3b, 0x1ca: 0x3c, 0x1cb: 0x3d, 0x1cc: 0x3e, 0x1cd: 0x3f, 0x1ce: 0x40, 0x1cf: 0x41,
+ 0x1d0: 0x9f, 0x1d1: 0x9f, 0x1d2: 0x9f, 0x1d3: 0x9f, 0x1d4: 0x9f, 0x1d5: 0x9f, 0x1d6: 0x9f, 0x1d7: 0x9f,
+ 0x1d8: 0x9f, 0x1d9: 0x9f, 0x1da: 0x9f, 0x1db: 0x9f, 0x1dc: 0x9f, 0x1dd: 0x9f, 0x1de: 0x9f, 0x1df: 0x9f,
+ 0x1e0: 0x9f, 0x1e1: 0x9f, 0x1e2: 0x9f, 0x1e3: 0x9f, 0x1e4: 0x9f, 0x1e5: 0x9f, 0x1e6: 0x9f, 0x1e7: 0x9f,
+ 0x1e8: 0x9f, 0x1e9: 0x9f, 0x1ea: 0x9f, 0x1eb: 0x9f, 0x1ec: 0x9f, 0x1ed: 0x9f, 0x1ee: 0x9f, 0x1ef: 0x9f,
+ 0x1f0: 0x9f, 0x1f1: 0x9f, 0x1f2: 0x9f, 0x1f3: 0x9f, 0x1f4: 0x9f, 0x1f5: 0x9f, 0x1f6: 0x9f, 0x1f7: 0x9f,
+ 0x1f8: 0x9f, 0x1f9: 0x9f, 0x1fa: 0x9f, 0x1fb: 0x9f, 0x1fc: 0x9f, 0x1fd: 0x9f, 0x1fe: 0x9f, 0x1ff: 0x9f,
+ // Block 0x8, offset 0x200
+ 0x200: 0x9f, 0x201: 0x9f, 0x202: 0x9f, 0x203: 0x9f, 0x204: 0x9f, 0x205: 0x9f, 0x206: 0x9f, 0x207: 0x9f,
+ 0x208: 0x9f, 0x209: 0x9f, 0x20a: 0x9f, 0x20b: 0x9f, 0x20c: 0x9f, 0x20d: 0x9f, 0x20e: 0x9f, 0x20f: 0x9f,
+ 0x210: 0x9f, 0x211: 0x9f, 0x212: 0x9f, 0x213: 0x9f, 0x214: 0x9f, 0x215: 0x9f, 0x216: 0x9f, 0x217: 0x9f,
+ 0x218: 0x9f, 0x219: 0x9f, 0x21a: 0x9f, 0x21b: 0x9f, 0x21c: 0x9f, 0x21d: 0x9f, 0x21e: 0x9f, 0x21f: 0x9f,
+ 0x220: 0x9f, 0x221: 0x9f, 0x222: 0x9f, 0x223: 0x9f, 0x224: 0x9f, 0x225: 0x9f, 0x226: 0x9f, 0x227: 0x9f,
+ 0x228: 0x9f, 0x229: 0x9f, 0x22a: 0x9f, 0x22b: 0x9f, 0x22c: 0x9f, 0x22d: 0x9f, 0x22e: 0x9f, 0x22f: 0x9f,
+ 0x230: 0x9f, 0x231: 0x9f, 0x232: 0x9f, 0x233: 0x9f, 0x234: 0x9f, 0x235: 0x9f, 0x236: 0xb2, 0x237: 0x9b,
+ 0x238: 0x9f, 0x239: 0x9f, 0x23a: 0x9f, 0x23b: 0x9f, 0x23c: 0x9f, 0x23d: 0x9f, 0x23e: 0x9f, 0x23f: 0x9f,
+ // Block 0x9, offset 0x240
+ 0x240: 0x9f, 0x241: 0x9f, 0x242: 0x9f, 0x243: 0x9f, 0x244: 0x9f, 0x245: 0x9f, 0x246: 0x9f, 0x247: 0x9f,
+ 0x248: 0x9f, 0x249: 0x9f, 0x24a: 0x9f, 0x24b: 0x9f, 0x24c: 0x9f, 0x24d: 0x9f, 0x24e: 0x9f, 0x24f: 0x9f,
+ 0x250: 0x9f, 0x251: 0x9f, 0x252: 0x9f, 0x253: 0x9f, 0x254: 0x9f, 0x255: 0x9f, 0x256: 0x9f, 0x257: 0x9f,
+ 0x258: 0x9f, 0x259: 0x9f, 0x25a: 0x9f, 0x25b: 0x9f, 0x25c: 0x9f, 0x25d: 0x9f, 0x25e: 0x9f, 0x25f: 0x9f,
+ 0x260: 0x9f, 0x261: 0x9f, 0x262: 0x9f, 0x263: 0x9f, 0x264: 0x9f, 0x265: 0x9f, 0x266: 0x9f, 0x267: 0x9f,
+ 0x268: 0x9f, 0x269: 0x9f, 0x26a: 0x9f, 0x26b: 0x9f, 0x26c: 0x9f, 0x26d: 0x9f, 0x26e: 0x9f, 0x26f: 0x9f,
+ 0x270: 0x9f, 0x271: 0x9f, 0x272: 0x9f, 0x273: 0x9f, 0x274: 0x9f, 0x275: 0x9f, 0x276: 0x9f, 0x277: 0x9f,
+ 0x278: 0x9f, 0x279: 0x9f, 0x27a: 0x9f, 0x27b: 0x9f, 0x27c: 0x9f, 0x27d: 0x9f, 0x27e: 0x9f, 0x27f: 0x9f,
+ // Block 0xa, offset 0x280
+ 0x280: 0x9f, 0x281: 0x9f, 0x282: 0x9f, 0x283: 0x9f, 0x284: 0x9f, 0x285: 0x9f, 0x286: 0x9f, 0x287: 0x9f,
+ 0x288: 0x9f, 0x289: 0x9f, 0x28a: 0x9f, 0x28b: 0x9f, 0x28c: 0x9f, 0x28d: 0x9f, 0x28e: 0x9f, 0x28f: 0x9f,
+ 0x290: 0x9f, 0x291: 0x9f, 0x292: 0x9f, 0x293: 0x9f, 0x294: 0x9f, 0x295: 0x9f, 0x296: 0x9f, 0x297: 0x9f,
+ 0x298: 0x9f, 0x299: 0x9f, 0x29a: 0x9f, 0x29b: 0x9f, 0x29c: 0x9f, 0x29d: 0x9f, 0x29e: 0x9f, 0x29f: 0x9f,
+ 0x2a0: 0x9f, 0x2a1: 0x9f, 0x2a2: 0x9f, 0x2a3: 0x9f, 0x2a4: 0x9f, 0x2a5: 0x9f, 0x2a6: 0x9f, 0x2a7: 0x9f,
+ 0x2a8: 0x9f, 0x2a9: 0x9f, 0x2aa: 0x9f, 0x2ab: 0x9f, 0x2ac: 0x9f, 0x2ad: 0x9f, 0x2ae: 0x9f, 0x2af: 0x9f,
+ 0x2b0: 0x9f, 0x2b1: 0x9f, 0x2b2: 0x9f, 0x2b3: 0x9f, 0x2b4: 0x9f, 0x2b5: 0x9f, 0x2b6: 0x9f, 0x2b7: 0x9f,
+ 0x2b8: 0x9f, 0x2b9: 0x9f, 0x2ba: 0x9f, 0x2bb: 0x9f, 0x2bc: 0x9f, 0x2bd: 0x9f, 0x2be: 0x9f, 0x2bf: 0xe2,
+ // Block 0xb, offset 0x2c0
+ 0x2c0: 0x9f, 0x2c1: 0x9f, 0x2c2: 0x9f, 0x2c3: 0x9f, 0x2c4: 0x9f, 0x2c5: 0x9f, 0x2c6: 0x9f, 0x2c7: 0x9f,
+ 0x2c8: 0x9f, 0x2c9: 0x9f, 0x2ca: 0x9f, 0x2cb: 0x9f, 0x2cc: 0x9f, 0x2cd: 0x9f, 0x2ce: 0x9f, 0x2cf: 0x9f,
+ 0x2d0: 0x9f, 0x2d1: 0x9f, 0x2d2: 0xe3, 0x2d3: 0xe4, 0x2d4: 0x9f, 0x2d5: 0x9f, 0x2d6: 0x9f, 0x2d7: 0x9f,
+ 0x2d8: 0xe5, 0x2d9: 0x42, 0x2da: 0x43, 0x2db: 0xe6, 0x2dc: 0x44, 0x2dd: 0x45, 0x2de: 0x46, 0x2df: 0xe7,
+ 0x2e0: 0xe8, 0x2e1: 0xe9, 0x2e2: 0xea, 0x2e3: 0xeb, 0x2e4: 0xec, 0x2e5: 0xed, 0x2e6: 0xee, 0x2e7: 0xef,
+ 0x2e8: 0xf0, 0x2e9: 0xf1, 0x2ea: 0xf2, 0x2eb: 0xf3, 0x2ec: 0xf4, 0x2ed: 0xf5, 0x2ee: 0xf6, 0x2ef: 0xf7,
+ 0x2f0: 0x9f, 0x2f1: 0x9f, 0x2f2: 0x9f, 0x2f3: 0x9f, 0x2f4: 0x9f, 0x2f5: 0x9f, 0x2f6: 0x9f, 0x2f7: 0x9f,
+ 0x2f8: 0x9f, 0x2f9: 0x9f, 0x2fa: 0x9f, 0x2fb: 0x9f, 0x2fc: 0x9f, 0x2fd: 0x9f, 0x2fe: 0x9f, 0x2ff: 0x9f,
+ // Block 0xc, offset 0x300
+ 0x300: 0x9f, 0x301: 0x9f, 0x302: 0x9f, 0x303: 0x9f, 0x304: 0x9f, 0x305: 0x9f, 0x306: 0x9f, 0x307: 0x9f,
+ 0x308: 0x9f, 0x309: 0x9f, 0x30a: 0x9f, 0x30b: 0x9f, 0x30c: 0x9f, 0x30d: 0x9f, 0x30e: 0x9f, 0x30f: 0x9f,
+ 0x310: 0x9f, 0x311: 0x9f, 0x312: 0x9f, 0x313: 0x9f, 0x314: 0x9f, 0x315: 0x9f, 0x316: 0x9f, 0x317: 0x9f,
+ 0x318: 0x9f, 0x319: 0x9f, 0x31a: 0x9f, 0x31b: 0x9f, 0x31c: 0x9f, 0x31d: 0x9f, 0x31e: 0xf8, 0x31f: 0xf9,
+ // Block 0xd, offset 0x340
+ 0x340: 0xba, 0x341: 0xba, 0x342: 0xba, 0x343: 0xba, 0x344: 0xba, 0x345: 0xba, 0x346: 0xba, 0x347: 0xba,
+ 0x348: 0xba, 0x349: 0xba, 0x34a: 0xba, 0x34b: 0xba, 0x34c: 0xba, 0x34d: 0xba, 0x34e: 0xba, 0x34f: 0xba,
+ 0x350: 0xba, 0x351: 0xba, 0x352: 0xba, 0x353: 0xba, 0x354: 0xba, 0x355: 0xba, 0x356: 0xba, 0x357: 0xba,
+ 0x358: 0xba, 0x359: 0xba, 0x35a: 0xba, 0x35b: 0xba, 0x35c: 0xba, 0x35d: 0xba, 0x35e: 0xba, 0x35f: 0xba,
+ 0x360: 0xba, 0x361: 0xba, 0x362: 0xba, 0x363: 0xba, 0x364: 0xba, 0x365: 0xba, 0x366: 0xba, 0x367: 0xba,
+ 0x368: 0xba, 0x369: 0xba, 0x36a: 0xba, 0x36b: 0xba, 0x36c: 0xba, 0x36d: 0xba, 0x36e: 0xba, 0x36f: 0xba,
+ 0x370: 0xba, 0x371: 0xba, 0x372: 0xba, 0x373: 0xba, 0x374: 0xba, 0x375: 0xba, 0x376: 0xba, 0x377: 0xba,
+ 0x378: 0xba, 0x379: 0xba, 0x37a: 0xba, 0x37b: 0xba, 0x37c: 0xba, 0x37d: 0xba, 0x37e: 0xba, 0x37f: 0xba,
+ // Block 0xe, offset 0x380
+ 0x380: 0xba, 0x381: 0xba, 0x382: 0xba, 0x383: 0xba, 0x384: 0xba, 0x385: 0xba, 0x386: 0xba, 0x387: 0xba,
+ 0x388: 0xba, 0x389: 0xba, 0x38a: 0xba, 0x38b: 0xba, 0x38c: 0xba, 0x38d: 0xba, 0x38e: 0xba, 0x38f: 0xba,
+ 0x390: 0xba, 0x391: 0xba, 0x392: 0xba, 0x393: 0xba, 0x394: 0xba, 0x395: 0xba, 0x396: 0xba, 0x397: 0xba,
+ 0x398: 0xba, 0x399: 0xba, 0x39a: 0xba, 0x39b: 0xba, 0x39c: 0xba, 0x39d: 0xba, 0x39e: 0xba, 0x39f: 0xba,
+ 0x3a0: 0xba, 0x3a1: 0xba, 0x3a2: 0xba, 0x3a3: 0xba, 0x3a4: 0xfa, 0x3a5: 0xfb, 0x3a6: 0xfc, 0x3a7: 0xfd,
+ 0x3a8: 0x47, 0x3a9: 0xfe, 0x3aa: 0xff, 0x3ab: 0x48, 0x3ac: 0x49, 0x3ad: 0x4a, 0x3ae: 0x4b, 0x3af: 0x4c,
+ 0x3b0: 0x100, 0x3b1: 0x4d, 0x3b2: 0x4e, 0x3b3: 0x4f, 0x3b4: 0x50, 0x3b5: 0x51, 0x3b6: 0x101, 0x3b7: 0x52,
+ 0x3b8: 0x53, 0x3b9: 0x54, 0x3ba: 0x55, 0x3bb: 0x56, 0x3bc: 0x57, 0x3bd: 0x58, 0x3be: 0x59, 0x3bf: 0x5a,
+ // Block 0xf, offset 0x3c0
+ 0x3c0: 0x102, 0x3c1: 0x103, 0x3c2: 0x9f, 0x3c3: 0x104, 0x3c4: 0x105, 0x3c5: 0x9b, 0x3c6: 0x106, 0x3c7: 0x107,
+ 0x3c8: 0xba, 0x3c9: 0xba, 0x3ca: 0x108, 0x3cb: 0x109, 0x3cc: 0x10a, 0x3cd: 0x10b, 0x3ce: 0x10c, 0x3cf: 0x10d,
+ 0x3d0: 0x10e, 0x3d1: 0x9f, 0x3d2: 0x10f, 0x3d3: 0x110, 0x3d4: 0x111, 0x3d5: 0x112, 0x3d6: 0xba, 0x3d7: 0xba,
+ 0x3d8: 0x9f, 0x3d9: 0x9f, 0x3da: 0x9f, 0x3db: 0x9f, 0x3dc: 0x113, 0x3dd: 0x114, 0x3de: 0xba, 0x3df: 0xba,
+ 0x3e0: 0x115, 0x3e1: 0x116, 0x3e2: 0x117, 0x3e3: 0x118, 0x3e4: 0x119, 0x3e5: 0xba, 0x3e6: 0x11a, 0x3e7: 0x11b,
+ 0x3e8: 0x11c, 0x3e9: 0x11d, 0x3ea: 0x11e, 0x3eb: 0x5b, 0x3ec: 0x11f, 0x3ed: 0x120, 0x3ee: 0x5c, 0x3ef: 0xba,
+ 0x3f0: 0x121, 0x3f1: 0x122, 0x3f2: 0x123, 0x3f3: 0x124, 0x3f4: 0x125, 0x3f5: 0xba, 0x3f6: 0xba, 0x3f7: 0xba,
+ 0x3f8: 0xba, 0x3f9: 0x126, 0x3fa: 0xba, 0x3fb: 0xba, 0x3fc: 0x127, 0x3fd: 0x128, 0x3fe: 0xba, 0x3ff: 0x129,
+ // Block 0x10, offset 0x400
+ 0x400: 0x12a, 0x401: 0x12b, 0x402: 0x12c, 0x403: 0x12d, 0x404: 0x12e, 0x405: 0x12f, 0x406: 0x130, 0x407: 0x131,
+ 0x408: 0x132, 0x409: 0xba, 0x40a: 0x133, 0x40b: 0x134, 0x40c: 0x5d, 0x40d: 0x5e, 0x40e: 0xba, 0x40f: 0xba,
+ 0x410: 0x135, 0x411: 0x136, 0x412: 0x137, 0x413: 0x138, 0x414: 0xba, 0x415: 0xba, 0x416: 0x139, 0x417: 0x13a,
+ 0x418: 0x13b, 0x419: 0x13c, 0x41a: 0x13d, 0x41b: 0x13e, 0x41c: 0x13f, 0x41d: 0xba, 0x41e: 0xba, 0x41f: 0xba,
+ 0x420: 0x140, 0x421: 0xba, 0x422: 0x141, 0x423: 0x142, 0x424: 0xba, 0x425: 0xba, 0x426: 0x143, 0x427: 0x144,
+ 0x428: 0x145, 0x429: 0x146, 0x42a: 0x147, 0x42b: 0x148, 0x42c: 0xba, 0x42d: 0xba, 0x42e: 0xba, 0x42f: 0xba,
+ 0x430: 0x149, 0x431: 0x14a, 0x432: 0x14b, 0x433: 0xba, 0x434: 0x14c, 0x435: 0x14d, 0x436: 0x14e, 0x437: 0xba,
+ 0x438: 0xba, 0x439: 0xba, 0x43a: 0xba, 0x43b: 0x14f, 0x43c: 0xba, 0x43d: 0xba, 0x43e: 0xba, 0x43f: 0x150,
+ // Block 0x11, offset 0x440
+ 0x440: 0x9f, 0x441: 0x9f, 0x442: 0x9f, 0x443: 0x9f, 0x444: 0x9f, 0x445: 0x9f, 0x446: 0x9f, 0x447: 0x9f,
+ 0x448: 0x9f, 0x449: 0x9f, 0x44a: 0x9f, 0x44b: 0x9f, 0x44c: 0x9f, 0x44d: 0x9f, 0x44e: 0x151, 0x44f: 0xba,
+ 0x450: 0x9b, 0x451: 0x152, 0x452: 0x9f, 0x453: 0x9f, 0x454: 0x9f, 0x455: 0x153, 0x456: 0xba, 0x457: 0xba,
+ 0x458: 0xba, 0x459: 0xba, 0x45a: 0xba, 0x45b: 0xba, 0x45c: 0xba, 0x45d: 0xba, 0x45e: 0xba, 0x45f: 0xba,
+ 0x460: 0xba, 0x461: 0xba, 0x462: 0xba, 0x463: 0xba, 0x464: 0xba, 0x465: 0xba, 0x466: 0xba, 0x467: 0xba,
+ 0x468: 0xba, 0x469: 0xba, 0x46a: 0xba, 0x46b: 0xba, 0x46c: 0xba, 0x46d: 0xba, 0x46e: 0xba, 0x46f: 0xba,
+ 0x470: 0xba, 0x471: 0xba, 0x472: 0xba, 0x473: 0xba, 0x474: 0xba, 0x475: 0xba, 0x476: 0xba, 0x477: 0xba,
+ 0x478: 0xba, 0x479: 0xba, 0x47a: 0xba, 0x47b: 0xba, 0x47c: 0xba, 0x47d: 0xba, 0x47e: 0xba, 0x47f: 0xba,
+ // Block 0x12, offset 0x480
+ 0x480: 0x9f, 0x481: 0x9f, 0x482: 0x9f, 0x483: 0x9f, 0x484: 0x9f, 0x485: 0x9f, 0x486: 0x9f, 0x487: 0x9f,
+ 0x488: 0x9f, 0x489: 0x9f, 0x48a: 0x9f, 0x48b: 0x9f, 0x48c: 0x9f, 0x48d: 0x9f, 0x48e: 0x9f, 0x48f: 0x9f,
+ 0x490: 0x154, 0x491: 0xba, 0x492: 0xba, 0x493: 0xba, 0x494: 0xba, 0x495: 0xba, 0x496: 0xba, 0x497: 0xba,
+ 0x498: 0xba, 0x499: 0xba, 0x49a: 0xba, 0x49b: 0xba, 0x49c: 0xba, 0x49d: 0xba, 0x49e: 0xba, 0x49f: 0xba,
+ 0x4a0: 0xba, 0x4a1: 0xba, 0x4a2: 0xba, 0x4a3: 0xba, 0x4a4: 0xba, 0x4a5: 0xba, 0x4a6: 0xba, 0x4a7: 0xba,
+ 0x4a8: 0xba, 0x4a9: 0xba, 0x4aa: 0xba, 0x4ab: 0xba, 0x4ac: 0xba, 0x4ad: 0xba, 0x4ae: 0xba, 0x4af: 0xba,
+ 0x4b0: 0xba, 0x4b1: 0xba, 0x4b2: 0xba, 0x4b3: 0xba, 0x4b4: 0xba, 0x4b5: 0xba, 0x4b6: 0xba, 0x4b7: 0xba,
+ 0x4b8: 0xba, 0x4b9: 0xba, 0x4ba: 0xba, 0x4bb: 0xba, 0x4bc: 0xba, 0x4bd: 0xba, 0x4be: 0xba, 0x4bf: 0xba,
+ // Block 0x13, offset 0x4c0
+ 0x4c0: 0xba, 0x4c1: 0xba, 0x4c2: 0xba, 0x4c3: 0xba, 0x4c4: 0xba, 0x4c5: 0xba, 0x4c6: 0xba, 0x4c7: 0xba,
+ 0x4c8: 0xba, 0x4c9: 0xba, 0x4ca: 0xba, 0x4cb: 0xba, 0x4cc: 0xba, 0x4cd: 0xba, 0x4ce: 0xba, 0x4cf: 0xba,
+ 0x4d0: 0x9f, 0x4d1: 0x9f, 0x4d2: 0x9f, 0x4d3: 0x9f, 0x4d4: 0x9f, 0x4d5: 0x9f, 0x4d6: 0x9f, 0x4d7: 0x9f,
+ 0x4d8: 0x9f, 0x4d9: 0x155, 0x4da: 0xba, 0x4db: 0xba, 0x4dc: 0xba, 0x4dd: 0xba, 0x4de: 0xba, 0x4df: 0xba,
+ 0x4e0: 0xba, 0x4e1: 0xba, 0x4e2: 0xba, 0x4e3: 0xba, 0x4e4: 0xba, 0x4e5: 0xba, 0x4e6: 0xba, 0x4e7: 0xba,
+ 0x4e8: 0xba, 0x4e9: 0xba, 0x4ea: 0xba, 0x4eb: 0xba, 0x4ec: 0xba, 0x4ed: 0xba, 0x4ee: 0xba, 0x4ef: 0xba,
+ 0x4f0: 0xba, 0x4f1: 0xba, 0x4f2: 0xba, 0x4f3: 0xba, 0x4f4: 0xba, 0x4f5: 0xba, 0x4f6: 0xba, 0x4f7: 0xba,
+ 0x4f8: 0xba, 0x4f9: 0xba, 0x4fa: 0xba, 0x4fb: 0xba, 0x4fc: 0xba, 0x4fd: 0xba, 0x4fe: 0xba, 0x4ff: 0xba,
+ // Block 0x14, offset 0x500
+ 0x500: 0xba, 0x501: 0xba, 0x502: 0xba, 0x503: 0xba, 0x504: 0xba, 0x505: 0xba, 0x506: 0xba, 0x507: 0xba,
+ 0x508: 0xba, 0x509: 0xba, 0x50a: 0xba, 0x50b: 0xba, 0x50c: 0xba, 0x50d: 0xba, 0x50e: 0xba, 0x50f: 0xba,
+ 0x510: 0xba, 0x511: 0xba, 0x512: 0xba, 0x513: 0xba, 0x514: 0xba, 0x515: 0xba, 0x516: 0xba, 0x517: 0xba,
+ 0x518: 0xba, 0x519: 0xba, 0x51a: 0xba, 0x51b: 0xba, 0x51c: 0xba, 0x51d: 0xba, 0x51e: 0xba, 0x51f: 0xba,
+ 0x520: 0x9f, 0x521: 0x9f, 0x522: 0x9f, 0x523: 0x9f, 0x524: 0x9f, 0x525: 0x9f, 0x526: 0x9f, 0x527: 0x9f,
+ 0x528: 0x148, 0x529: 0x156, 0x52a: 0xba, 0x52b: 0x157, 0x52c: 0x158, 0x52d: 0x159, 0x52e: 0x15a, 0x52f: 0xba,
+ 0x530: 0xba, 0x531: 0xba, 0x532: 0xba, 0x533: 0xba, 0x534: 0xba, 0x535: 0xba, 0x536: 0xba, 0x537: 0xba,
+ 0x538: 0xba, 0x539: 0x15b, 0x53a: 0x15c, 0x53b: 0xba, 0x53c: 0x9f, 0x53d: 0x15d, 0x53e: 0x15e, 0x53f: 0x15f,
+ // Block 0x15, offset 0x540
+ 0x540: 0x9f, 0x541: 0x9f, 0x542: 0x9f, 0x543: 0x9f, 0x544: 0x9f, 0x545: 0x9f, 0x546: 0x9f, 0x547: 0x9f,
+ 0x548: 0x9f, 0x549: 0x9f, 0x54a: 0x9f, 0x54b: 0x9f, 0x54c: 0x9f, 0x54d: 0x9f, 0x54e: 0x9f, 0x54f: 0x9f,
+ 0x550: 0x9f, 0x551: 0x9f, 0x552: 0x9f, 0x553: 0x9f, 0x554: 0x9f, 0x555: 0x9f, 0x556: 0x9f, 0x557: 0x9f,
+ 0x558: 0x9f, 0x559: 0x9f, 0x55a: 0x9f, 0x55b: 0x9f, 0x55c: 0x9f, 0x55d: 0x9f, 0x55e: 0x9f, 0x55f: 0x160,
+ 0x560: 0x9f, 0x561: 0x9f, 0x562: 0x9f, 0x563: 0x9f, 0x564: 0x9f, 0x565: 0x9f, 0x566: 0x9f, 0x567: 0x9f,
+ 0x568: 0x9f, 0x569: 0x9f, 0x56a: 0x9f, 0x56b: 0x161, 0x56c: 0xba, 0x56d: 0xba, 0x56e: 0xba, 0x56f: 0xba,
+ 0x570: 0xba, 0x571: 0xba, 0x572: 0xba, 0x573: 0xba, 0x574: 0xba, 0x575: 0xba, 0x576: 0xba, 0x577: 0xba,
+ 0x578: 0xba, 0x579: 0xba, 0x57a: 0xba, 0x57b: 0xba, 0x57c: 0xba, 0x57d: 0xba, 0x57e: 0xba, 0x57f: 0xba,
+ // Block 0x16, offset 0x580
+ 0x580: 0x9f, 0x581: 0x9f, 0x582: 0x9f, 0x583: 0x9f, 0x584: 0x162, 0x585: 0x163, 0x586: 0x9f, 0x587: 0x9f,
+ 0x588: 0x9f, 0x589: 0x9f, 0x58a: 0x9f, 0x58b: 0x164, 0x58c: 0xba, 0x58d: 0xba, 0x58e: 0xba, 0x58f: 0xba,
+ 0x590: 0xba, 0x591: 0xba, 0x592: 0xba, 0x593: 0xba, 0x594: 0xba, 0x595: 0xba, 0x596: 0xba, 0x597: 0xba,
+ 0x598: 0xba, 0x599: 0xba, 0x59a: 0xba, 0x59b: 0xba, 0x59c: 0xba, 0x59d: 0xba, 0x59e: 0xba, 0x59f: 0xba,
+ 0x5a0: 0xba, 0x5a1: 0xba, 0x5a2: 0xba, 0x5a3: 0xba, 0x5a4: 0xba, 0x5a5: 0xba, 0x5a6: 0xba, 0x5a7: 0xba,
+ 0x5a8: 0xba, 0x5a9: 0xba, 0x5aa: 0xba, 0x5ab: 0xba, 0x5ac: 0xba, 0x5ad: 0xba, 0x5ae: 0xba, 0x5af: 0xba,
+ 0x5b0: 0x9f, 0x5b1: 0x165, 0x5b2: 0x166, 0x5b3: 0xba, 0x5b4: 0xba, 0x5b5: 0xba, 0x5b6: 0xba, 0x5b7: 0xba,
+ 0x5b8: 0xba, 0x5b9: 0xba, 0x5ba: 0xba, 0x5bb: 0xba, 0x5bc: 0xba, 0x5bd: 0xba, 0x5be: 0xba, 0x5bf: 0xba,
+ // Block 0x17, offset 0x5c0
+ 0x5c0: 0x9b, 0x5c1: 0x9b, 0x5c2: 0x9b, 0x5c3: 0x167, 0x5c4: 0x168, 0x5c5: 0x169, 0x5c6: 0x16a, 0x5c7: 0x16b,
+ 0x5c8: 0x9b, 0x5c9: 0x16c, 0x5ca: 0xba, 0x5cb: 0x16d, 0x5cc: 0x9b, 0x5cd: 0x16e, 0x5ce: 0xba, 0x5cf: 0xba,
+ 0x5d0: 0x5f, 0x5d1: 0x60, 0x5d2: 0x61, 0x5d3: 0x62, 0x5d4: 0x63, 0x5d5: 0x64, 0x5d6: 0x65, 0x5d7: 0x66,
+ 0x5d8: 0x67, 0x5d9: 0x68, 0x5da: 0x69, 0x5db: 0x6a, 0x5dc: 0x6b, 0x5dd: 0x6c, 0x5de: 0x6d, 0x5df: 0x6e,
+ 0x5e0: 0x9b, 0x5e1: 0x9b, 0x5e2: 0x9b, 0x5e3: 0x9b, 0x5e4: 0x9b, 0x5e5: 0x9b, 0x5e6: 0x9b, 0x5e7: 0x9b,
+ 0x5e8: 0x16f, 0x5e9: 0x170, 0x5ea: 0x171, 0x5eb: 0xba, 0x5ec: 0xba, 0x5ed: 0xba, 0x5ee: 0xba, 0x5ef: 0xba,
+ 0x5f0: 0xba, 0x5f1: 0xba, 0x5f2: 0xba, 0x5f3: 0xba, 0x5f4: 0xba, 0x5f5: 0xba, 0x5f6: 0xba, 0x5f7: 0xba,
+ 0x5f8: 0xba, 0x5f9: 0xba, 0x5fa: 0xba, 0x5fb: 0xba, 0x5fc: 0xba, 0x5fd: 0xba, 0x5fe: 0xba, 0x5ff: 0xba,
+ // Block 0x18, offset 0x600
+ 0x600: 0x172, 0x601: 0xba, 0x602: 0xba, 0x603: 0xba, 0x604: 0x173, 0x605: 0x174, 0x606: 0xba, 0x607: 0xba,
+ 0x608: 0xba, 0x609: 0xba, 0x60a: 0xba, 0x60b: 0x175, 0x60c: 0xba, 0x60d: 0xba, 0x60e: 0xba, 0x60f: 0xba,
+ 0x610: 0xba, 0x611: 0xba, 0x612: 0xba, 0x613: 0xba, 0x614: 0xba, 0x615: 0xba, 0x616: 0xba, 0x617: 0xba,
+ 0x618: 0xba, 0x619: 0xba, 0x61a: 0xba, 0x61b: 0xba, 0x61c: 0xba, 0x61d: 0xba, 0x61e: 0xba, 0x61f: 0xba,
+ 0x620: 0x121, 0x621: 0x121, 0x622: 0x121, 0x623: 0x176, 0x624: 0x6f, 0x625: 0x177, 0x626: 0xba, 0x627: 0xba,
+ 0x628: 0xba, 0x629: 0xba, 0x62a: 0xba, 0x62b: 0xba, 0x62c: 0xba, 0x62d: 0xba, 0x62e: 0xba, 0x62f: 0xba,
+ 0x630: 0xba, 0x631: 0x178, 0x632: 0x179, 0x633: 0xba, 0x634: 0x17a, 0x635: 0xba, 0x636: 0xba, 0x637: 0xba,
+ 0x638: 0x70, 0x639: 0x71, 0x63a: 0x72, 0x63b: 0x17b, 0x63c: 0xba, 0x63d: 0xba, 0x63e: 0xba, 0x63f: 0xba,
+ // Block 0x19, offset 0x640
+ 0x640: 0x17c, 0x641: 0x9b, 0x642: 0x17d, 0x643: 0x17e, 0x644: 0x73, 0x645: 0x74, 0x646: 0x17f, 0x647: 0x180,
+ 0x648: 0x75, 0x649: 0x181, 0x64a: 0xba, 0x64b: 0xba, 0x64c: 0x9b, 0x64d: 0x9b, 0x64e: 0x9b, 0x64f: 0x9b,
+ 0x650: 0x9b, 0x651: 0x9b, 0x652: 0x9b, 0x653: 0x9b, 0x654: 0x9b, 0x655: 0x9b, 0x656: 0x9b, 0x657: 0x9b,
+ 0x658: 0x9b, 0x659: 0x9b, 0x65a: 0x9b, 0x65b: 0x182, 0x65c: 0x9b, 0x65d: 0x183, 0x65e: 0x9b, 0x65f: 0x184,
+ 0x660: 0x185, 0x661: 0x186, 0x662: 0x187, 0x663: 0xba, 0x664: 0x188, 0x665: 0x189, 0x666: 0x18a, 0x667: 0x18b,
+ 0x668: 0x9b, 0x669: 0x18c, 0x66a: 0x18d, 0x66b: 0xba, 0x66c: 0xba, 0x66d: 0xba, 0x66e: 0xba, 0x66f: 0xba,
+ 0x670: 0xba, 0x671: 0xba, 0x672: 0xba, 0x673: 0xba, 0x674: 0xba, 0x675: 0xba, 0x676: 0xba, 0x677: 0xba,
+ 0x678: 0xba, 0x679: 0xba, 0x67a: 0xba, 0x67b: 0xba, 0x67c: 0xba, 0x67d: 0xba, 0x67e: 0xba, 0x67f: 0xba,
+ // Block 0x1a, offset 0x680
+ 0x680: 0x9f, 0x681: 0x9f, 0x682: 0x9f, 0x683: 0x9f, 0x684: 0x9f, 0x685: 0x9f, 0x686: 0x9f, 0x687: 0x9f,
+ 0x688: 0x9f, 0x689: 0x9f, 0x68a: 0x9f, 0x68b: 0x9f, 0x68c: 0x9f, 0x68d: 0x9f, 0x68e: 0x9f, 0x68f: 0x9f,
+ 0x690: 0x9f, 0x691: 0x9f, 0x692: 0x9f, 0x693: 0x9f, 0x694: 0x9f, 0x695: 0x9f, 0x696: 0x9f, 0x697: 0x9f,
+ 0x698: 0x9f, 0x699: 0x9f, 0x69a: 0x9f, 0x69b: 0x18e, 0x69c: 0x9f, 0x69d: 0x9f, 0x69e: 0x9f, 0x69f: 0x9f,
+ 0x6a0: 0x9f, 0x6a1: 0x9f, 0x6a2: 0x9f, 0x6a3: 0x9f, 0x6a4: 0x9f, 0x6a5: 0x9f, 0x6a6: 0x9f, 0x6a7: 0x9f,
+ 0x6a8: 0x9f, 0x6a9: 0x9f, 0x6aa: 0x9f, 0x6ab: 0x9f, 0x6ac: 0x9f, 0x6ad: 0x9f, 0x6ae: 0x9f, 0x6af: 0x9f,
+ 0x6b0: 0x9f, 0x6b1: 0x9f, 0x6b2: 0x9f, 0x6b3: 0x9f, 0x6b4: 0x9f, 0x6b5: 0x9f, 0x6b6: 0x9f, 0x6b7: 0x9f,
+ 0x6b8: 0x9f, 0x6b9: 0x9f, 0x6ba: 0x9f, 0x6bb: 0x9f, 0x6bc: 0x9f, 0x6bd: 0x9f, 0x6be: 0x9f, 0x6bf: 0x9f,
+ // Block 0x1b, offset 0x6c0
+ 0x6c0: 0x9f, 0x6c1: 0x9f, 0x6c2: 0x9f, 0x6c3: 0x9f, 0x6c4: 0x9f, 0x6c5: 0x9f, 0x6c6: 0x9f, 0x6c7: 0x9f,
+ 0x6c8: 0x9f, 0x6c9: 0x9f, 0x6ca: 0x9f, 0x6cb: 0x9f, 0x6cc: 0x9f, 0x6cd: 0x9f, 0x6ce: 0x9f, 0x6cf: 0x9f,
+ 0x6d0: 0x9f, 0x6d1: 0x9f, 0x6d2: 0x9f, 0x6d3: 0x9f, 0x6d4: 0x9f, 0x6d5: 0x9f, 0x6d6: 0x9f, 0x6d7: 0x9f,
+ 0x6d8: 0x9f, 0x6d9: 0x9f, 0x6da: 0x9f, 0x6db: 0x9f, 0x6dc: 0x18f, 0x6dd: 0x9f, 0x6de: 0x9f, 0x6df: 0x9f,
+ 0x6e0: 0x190, 0x6e1: 0x9f, 0x6e2: 0x9f, 0x6e3: 0x9f, 0x6e4: 0x9f, 0x6e5: 0x9f, 0x6e6: 0x9f, 0x6e7: 0x9f,
+ 0x6e8: 0x9f, 0x6e9: 0x9f, 0x6ea: 0x9f, 0x6eb: 0x9f, 0x6ec: 0x9f, 0x6ed: 0x9f, 0x6ee: 0x9f, 0x6ef: 0x9f,
+ 0x6f0: 0x9f, 0x6f1: 0x9f, 0x6f2: 0x9f, 0x6f3: 0x9f, 0x6f4: 0x9f, 0x6f5: 0x9f, 0x6f6: 0x9f, 0x6f7: 0x9f,
+ 0x6f8: 0x9f, 0x6f9: 0x9f, 0x6fa: 0x9f, 0x6fb: 0x9f, 0x6fc: 0x9f, 0x6fd: 0x9f, 0x6fe: 0x9f, 0x6ff: 0x9f,
+ // Block 0x1c, offset 0x700
+ 0x700: 0x9f, 0x701: 0x9f, 0x702: 0x9f, 0x703: 0x9f, 0x704: 0x9f, 0x705: 0x9f, 0x706: 0x9f, 0x707: 0x9f,
+ 0x708: 0x9f, 0x709: 0x9f, 0x70a: 0x9f, 0x70b: 0x9f, 0x70c: 0x9f, 0x70d: 0x9f, 0x70e: 0x9f, 0x70f: 0x9f,
+ 0x710: 0x9f, 0x711: 0x9f, 0x712: 0x9f, 0x713: 0x9f, 0x714: 0x9f, 0x715: 0x9f, 0x716: 0x9f, 0x717: 0x9f,
+ 0x718: 0x9f, 0x719: 0x9f, 0x71a: 0x9f, 0x71b: 0x9f, 0x71c: 0x9f, 0x71d: 0x9f, 0x71e: 0x9f, 0x71f: 0x9f,
+ 0x720: 0x9f, 0x721: 0x9f, 0x722: 0x9f, 0x723: 0x9f, 0x724: 0x9f, 0x725: 0x9f, 0x726: 0x9f, 0x727: 0x9f,
+ 0x728: 0x9f, 0x729: 0x9f, 0x72a: 0x9f, 0x72b: 0x9f, 0x72c: 0x9f, 0x72d: 0x9f, 0x72e: 0x9f, 0x72f: 0x9f,
+ 0x730: 0x9f, 0x731: 0x9f, 0x732: 0x9f, 0x733: 0x9f, 0x734: 0x9f, 0x735: 0x9f, 0x736: 0x9f, 0x737: 0x9f,
+ 0x738: 0x9f, 0x739: 0x9f, 0x73a: 0x191, 0x73b: 0x9f, 0x73c: 0x9f, 0x73d: 0x9f, 0x73e: 0x9f, 0x73f: 0x9f,
+ // Block 0x1d, offset 0x740
+ 0x740: 0x9f, 0x741: 0x9f, 0x742: 0x9f, 0x743: 0x9f, 0x744: 0x9f, 0x745: 0x9f, 0x746: 0x9f, 0x747: 0x9f,
+ 0x748: 0x9f, 0x749: 0x9f, 0x74a: 0x9f, 0x74b: 0x9f, 0x74c: 0x9f, 0x74d: 0x9f, 0x74e: 0x9f, 0x74f: 0x9f,
+ 0x750: 0x9f, 0x751: 0x9f, 0x752: 0x9f, 0x753: 0x9f, 0x754: 0x9f, 0x755: 0x9f, 0x756: 0x9f, 0x757: 0x9f,
+ 0x758: 0x9f, 0x759: 0x9f, 0x75a: 0x9f, 0x75b: 0x9f, 0x75c: 0x9f, 0x75d: 0x9f, 0x75e: 0x9f, 0x75f: 0x9f,
+ 0x760: 0x9f, 0x761: 0x9f, 0x762: 0x9f, 0x763: 0x9f, 0x764: 0x9f, 0x765: 0x9f, 0x766: 0x9f, 0x767: 0x9f,
+ 0x768: 0x9f, 0x769: 0x9f, 0x76a: 0x9f, 0x76b: 0x9f, 0x76c: 0x9f, 0x76d: 0x9f, 0x76e: 0x9f, 0x76f: 0x192,
+ 0x770: 0xba, 0x771: 0xba, 0x772: 0xba, 0x773: 0xba, 0x774: 0xba, 0x775: 0xba, 0x776: 0xba, 0x777: 0xba,
+ 0x778: 0xba, 0x779: 0xba, 0x77a: 0xba, 0x77b: 0xba, 0x77c: 0xba, 0x77d: 0xba, 0x77e: 0xba, 0x77f: 0xba,
+ // Block 0x1e, offset 0x780
+ 0x780: 0xba, 0x781: 0xba, 0x782: 0xba, 0x783: 0xba, 0x784: 0xba, 0x785: 0xba, 0x786: 0xba, 0x787: 0xba,
+ 0x788: 0xba, 0x789: 0xba, 0x78a: 0xba, 0x78b: 0xba, 0x78c: 0xba, 0x78d: 0xba, 0x78e: 0xba, 0x78f: 0xba,
+ 0x790: 0xba, 0x791: 0xba, 0x792: 0xba, 0x793: 0xba, 0x794: 0xba, 0x795: 0xba, 0x796: 0xba, 0x797: 0xba,
+ 0x798: 0xba, 0x799: 0xba, 0x79a: 0xba, 0x79b: 0xba, 0x79c: 0xba, 0x79d: 0xba, 0x79e: 0xba, 0x79f: 0xba,
+ 0x7a0: 0x76, 0x7a1: 0x77, 0x7a2: 0x78, 0x7a3: 0x193, 0x7a4: 0x79, 0x7a5: 0x7a, 0x7a6: 0x194, 0x7a7: 0x7b,
+ 0x7a8: 0x7c, 0x7a9: 0xba, 0x7aa: 0xba, 0x7ab: 0xba, 0x7ac: 0xba, 0x7ad: 0xba, 0x7ae: 0xba, 0x7af: 0xba,
+ 0x7b0: 0xba, 0x7b1: 0xba, 0x7b2: 0xba, 0x7b3: 0xba, 0x7b4: 0xba, 0x7b5: 0xba, 0x7b6: 0xba, 0x7b7: 0xba,
+ 0x7b8: 0xba, 0x7b9: 0xba, 0x7ba: 0xba, 0x7bb: 0xba, 0x7bc: 0xba, 0x7bd: 0xba, 0x7be: 0xba, 0x7bf: 0xba,
+ // Block 0x1f, offset 0x7c0
+ 0x7d0: 0x0d, 0x7d1: 0x0e, 0x7d2: 0x0f, 0x7d3: 0x10, 0x7d4: 0x11, 0x7d5: 0x0b, 0x7d6: 0x12, 0x7d7: 0x07,
+ 0x7d8: 0x13, 0x7d9: 0x0b, 0x7da: 0x0b, 0x7db: 0x14, 0x7dc: 0x0b, 0x7dd: 0x15, 0x7de: 0x16, 0x7df: 0x17,
+ 0x7e0: 0x07, 0x7e1: 0x07, 0x7e2: 0x07, 0x7e3: 0x07, 0x7e4: 0x07, 0x7e5: 0x07, 0x7e6: 0x07, 0x7e7: 0x07,
+ 0x7e8: 0x07, 0x7e9: 0x07, 0x7ea: 0x18, 0x7eb: 0x19, 0x7ec: 0x1a, 0x7ed: 0x07, 0x7ee: 0x1b, 0x7ef: 0x1c,
+ 0x7f0: 0x0b, 0x7f1: 0x0b, 0x7f2: 0x0b, 0x7f3: 0x0b, 0x7f4: 0x0b, 0x7f5: 0x0b, 0x7f6: 0x0b, 0x7f7: 0x0b,
+ 0x7f8: 0x0b, 0x7f9: 0x0b, 0x7fa: 0x0b, 0x7fb: 0x0b, 0x7fc: 0x0b, 0x7fd: 0x0b, 0x7fe: 0x0b, 0x7ff: 0x0b,
+ // Block 0x20, offset 0x800
+ 0x800: 0x0b, 0x801: 0x0b, 0x802: 0x0b, 0x803: 0x0b, 0x804: 0x0b, 0x805: 0x0b, 0x806: 0x0b, 0x807: 0x0b,
+ 0x808: 0x0b, 0x809: 0x0b, 0x80a: 0x0b, 0x80b: 0x0b, 0x80c: 0x0b, 0x80d: 0x0b, 0x80e: 0x0b, 0x80f: 0x0b,
+ 0x810: 0x0b, 0x811: 0x0b, 0x812: 0x0b, 0x813: 0x0b, 0x814: 0x0b, 0x815: 0x0b, 0x816: 0x0b, 0x817: 0x0b,
+ 0x818: 0x0b, 0x819: 0x0b, 0x81a: 0x0b, 0x81b: 0x0b, 0x81c: 0x0b, 0x81d: 0x0b, 0x81e: 0x0b, 0x81f: 0x0b,
+ 0x820: 0x0b, 0x821: 0x0b, 0x822: 0x0b, 0x823: 0x0b, 0x824: 0x0b, 0x825: 0x0b, 0x826: 0x0b, 0x827: 0x0b,
+ 0x828: 0x0b, 0x829: 0x0b, 0x82a: 0x0b, 0x82b: 0x0b, 0x82c: 0x0b, 0x82d: 0x0b, 0x82e: 0x0b, 0x82f: 0x0b,
+ 0x830: 0x0b, 0x831: 0x0b, 0x832: 0x0b, 0x833: 0x0b, 0x834: 0x0b, 0x835: 0x0b, 0x836: 0x0b, 0x837: 0x0b,
+ 0x838: 0x0b, 0x839: 0x0b, 0x83a: 0x0b, 0x83b: 0x0b, 0x83c: 0x0b, 0x83d: 0x0b, 0x83e: 0x0b, 0x83f: 0x0b,
+ // Block 0x21, offset 0x840
+ 0x840: 0x195, 0x841: 0x196, 0x842: 0xba, 0x843: 0xba, 0x844: 0x197, 0x845: 0x197, 0x846: 0x197, 0x847: 0x198,
+ 0x848: 0xba, 0x849: 0xba, 0x84a: 0xba, 0x84b: 0xba, 0x84c: 0xba, 0x84d: 0xba, 0x84e: 0xba, 0x84f: 0xba,
+ 0x850: 0xba, 0x851: 0xba, 0x852: 0xba, 0x853: 0xba, 0x854: 0xba, 0x855: 0xba, 0x856: 0xba, 0x857: 0xba,
+ 0x858: 0xba, 0x859: 0xba, 0x85a: 0xba, 0x85b: 0xba, 0x85c: 0xba, 0x85d: 0xba, 0x85e: 0xba, 0x85f: 0xba,
+ 0x860: 0xba, 0x861: 0xba, 0x862: 0xba, 0x863: 0xba, 0x864: 0xba, 0x865: 0xba, 0x866: 0xba, 0x867: 0xba,
+ 0x868: 0xba, 0x869: 0xba, 0x86a: 0xba, 0x86b: 0xba, 0x86c: 0xba, 0x86d: 0xba, 0x86e: 0xba, 0x86f: 0xba,
+ 0x870: 0xba, 0x871: 0xba, 0x872: 0xba, 0x873: 0xba, 0x874: 0xba, 0x875: 0xba, 0x876: 0xba, 0x877: 0xba,
+ 0x878: 0xba, 0x879: 0xba, 0x87a: 0xba, 0x87b: 0xba, 0x87c: 0xba, 0x87d: 0xba, 0x87e: 0xba, 0x87f: 0xba,
+ // Block 0x22, offset 0x880
+ 0x880: 0x0b, 0x881: 0x0b, 0x882: 0x0b, 0x883: 0x0b, 0x884: 0x0b, 0x885: 0x0b, 0x886: 0x0b, 0x887: 0x0b,
+ 0x888: 0x0b, 0x889: 0x0b, 0x88a: 0x0b, 0x88b: 0x0b, 0x88c: 0x0b, 0x88d: 0x0b, 0x88e: 0x0b, 0x88f: 0x0b,
+ 0x890: 0x0b, 0x891: 0x0b, 0x892: 0x0b, 0x893: 0x0b, 0x894: 0x0b, 0x895: 0x0b, 0x896: 0x0b, 0x897: 0x0b,
+ 0x898: 0x0b, 0x899: 0x0b, 0x89a: 0x0b, 0x89b: 0x0b, 0x89c: 0x0b, 0x89d: 0x0b, 0x89e: 0x0b, 0x89f: 0x0b,
+ 0x8a0: 0x1f, 0x8a1: 0x0b, 0x8a2: 0x0b, 0x8a3: 0x0b, 0x8a4: 0x0b, 0x8a5: 0x0b, 0x8a6: 0x0b, 0x8a7: 0x0b,
+ 0x8a8: 0x0b, 0x8a9: 0x0b, 0x8aa: 0x0b, 0x8ab: 0x0b, 0x8ac: 0x0b, 0x8ad: 0x0b, 0x8ae: 0x0b, 0x8af: 0x0b,
+ 0x8b0: 0x0b, 0x8b1: 0x0b, 0x8b2: 0x0b, 0x8b3: 0x0b, 0x8b4: 0x0b, 0x8b5: 0x0b, 0x8b6: 0x0b, 0x8b7: 0x0b,
+ 0x8b8: 0x0b, 0x8b9: 0x0b, 0x8ba: 0x0b, 0x8bb: 0x0b, 0x8bc: 0x0b, 0x8bd: 0x0b, 0x8be: 0x0b, 0x8bf: 0x0b,
+ // Block 0x23, offset 0x8c0
+ 0x8c0: 0x0b, 0x8c1: 0x0b, 0x8c2: 0x0b, 0x8c3: 0x0b, 0x8c4: 0x0b, 0x8c5: 0x0b, 0x8c6: 0x0b, 0x8c7: 0x0b,
+ 0x8c8: 0x0b, 0x8c9: 0x0b, 0x8ca: 0x0b, 0x8cb: 0x0b, 0x8cc: 0x0b, 0x8cd: 0x0b, 0x8ce: 0x0b, 0x8cf: 0x0b,
+}
+
+// idnaSparseOffset: 284 entries, 568 bytes
+var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x33, 0x3e, 0x4a, 0x4e, 0x5d, 0x62, 0x6c, 0x78, 0x86, 0x8b, 0x94, 0xa4, 0xb2, 0xbe, 0xca, 0xdb, 0xe5, 0xec, 0xf9, 0x10a, 0x111, 0x11c, 0x12b, 0x139, 0x143, 0x145, 0x14a, 0x14d, 0x150, 0x152, 0x15e, 0x169, 0x171, 0x177, 0x17d, 0x182, 0x187, 0x18a, 0x18e, 0x194, 0x199, 0x1a5, 0x1af, 0x1b5, 0x1c6, 0x1d0, 0x1d3, 0x1db, 0x1de, 0x1eb, 0x1f3, 0x1f7, 0x1fe, 0x206, 0x216, 0x222, 0x224, 0x22e, 0x23a, 0x246, 0x252, 0x25a, 0x25f, 0x26c, 0x27d, 0x281, 0x28c, 0x290, 0x299, 0x2a1, 0x2a7, 0x2ac, 0x2af, 0x2b3, 0x2b9, 0x2bd, 0x2c1, 0x2c5, 0x2cb, 0x2d3, 0x2da, 0x2e5, 0x2ef, 0x2f3, 0x2f6, 0x2fc, 0x300, 0x302, 0x305, 0x307, 0x30a, 0x314, 0x317, 0x326, 0x32a, 0x32f, 0x332, 0x336, 0x33b, 0x340, 0x346, 0x352, 0x361, 0x367, 0x36b, 0x37a, 0x37f, 0x387, 0x391, 0x39c, 0x3a4, 0x3b5, 0x3be, 0x3ce, 0x3db, 0x3e5, 0x3ea, 0x3f7, 0x3fb, 0x400, 0x402, 0x406, 0x408, 0x40c, 0x415, 0x41b, 0x41f, 0x42f, 0x439, 0x43e, 0x441, 0x447, 0x44e, 0x453, 0x457, 0x45d, 0x462, 0x46b, 0x470, 0x476, 0x47d, 0x484, 0x48b, 0x48f, 0x494, 0x497, 0x49c, 0x4a8, 0x4ae, 0x4b3, 0x4ba, 0x4c2, 0x4c7, 0x4cb, 0x4db, 0x4e2, 0x4e6, 0x4ea, 0x4f1, 0x4f3, 0x4f6, 0x4f9, 0x4fd, 0x506, 0x50a, 0x512, 0x51a, 0x51e, 0x524, 0x52d, 0x539, 0x540, 0x549, 0x553, 0x55a, 0x568, 0x575, 0x582, 0x58b, 0x58f, 0x59f, 0x5a7, 0x5b2, 0x5bb, 0x5c1, 0x5c9, 0x5d2, 0x5dd, 0x5e0, 0x5ec, 0x5f5, 0x5f8, 0x5fd, 0x602, 0x60f, 0x61a, 0x623, 0x62d, 0x630, 0x63a, 0x643, 0x64f, 0x65c, 0x669, 0x677, 0x67e, 0x682, 0x685, 0x68a, 0x68d, 0x692, 0x695, 0x69c, 0x6a3, 0x6a7, 0x6b2, 0x6b5, 0x6b8, 0x6bb, 0x6c1, 0x6c7, 0x6cd, 0x6d0, 0x6d3, 0x6d6, 0x6dd, 0x6e0, 0x6e5, 0x6ef, 0x6f2, 0x6f6, 0x705, 0x711, 0x715, 0x71a, 0x71e, 0x723, 0x727, 0x72c, 0x735, 0x740, 0x746, 0x74c, 0x752, 0x758, 0x761, 0x764, 0x767, 0x76b, 0x76f, 0x773, 0x779, 0x77f, 0x784, 0x787, 0x797, 0x79e, 0x7a1, 0x7a6, 0x7aa, 0x7b0, 0x7b5, 0x7b9, 0x7bf, 0x7c5, 0x7c9, 0x7d2, 0x7d7, 0x7da, 0x7dd, 0x7e1, 0x7e5, 0x7e8, 0x7f8, 0x809, 0x80e, 0x810, 0x812}
+
+// idnaSparseValues: 2069 entries, 8276 bytes
+var idnaSparseValues = [2069]valueRange{
+ // Block 0x0, offset 0x0
+ {value: 0x0000, lo: 0x07},
+ {value: 0xe105, lo: 0x80, hi: 0x96},
+ {value: 0x0018, lo: 0x97, hi: 0x97},
+ {value: 0xe105, lo: 0x98, hi: 0x9e},
+ {value: 0x001f, lo: 0x9f, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xb7},
+ {value: 0x0008, lo: 0xb8, hi: 0xbf},
+ // Block 0x1, offset 0x8
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0xe01d, lo: 0x81, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x82},
+ {value: 0x0335, lo: 0x83, hi: 0x83},
+ {value: 0x034d, lo: 0x84, hi: 0x84},
+ {value: 0x0365, lo: 0x85, hi: 0x85},
+ {value: 0xe00d, lo: 0x86, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x87},
+ {value: 0xe00d, lo: 0x88, hi: 0x88},
+ {value: 0x0008, lo: 0x89, hi: 0x89},
+ {value: 0xe00d, lo: 0x8a, hi: 0x8a},
+ {value: 0x0008, lo: 0x8b, hi: 0x8b},
+ {value: 0xe00d, lo: 0x8c, hi: 0x8c},
+ {value: 0x0008, lo: 0x8d, hi: 0x8d},
+ {value: 0xe00d, lo: 0x8e, hi: 0x8e},
+ {value: 0x0008, lo: 0x8f, hi: 0xbf},
+ // Block 0x2, offset 0x19
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x0249, lo: 0xb0, hi: 0xb0},
+ {value: 0x037d, lo: 0xb1, hi: 0xb1},
+ {value: 0x0259, lo: 0xb2, hi: 0xb2},
+ {value: 0x0269, lo: 0xb3, hi: 0xb3},
+ {value: 0x034d, lo: 0xb4, hi: 0xb4},
+ {value: 0x0395, lo: 0xb5, hi: 0xb5},
+ {value: 0xe1bd, lo: 0xb6, hi: 0xb6},
+ {value: 0x0279, lo: 0xb7, hi: 0xb7},
+ {value: 0x0289, lo: 0xb8, hi: 0xb8},
+ {value: 0x0008, lo: 0xb9, hi: 0xbf},
+ // Block 0x3, offset 0x25
+ {value: 0x0000, lo: 0x01},
+ {value: 0x3308, lo: 0x80, hi: 0xbf},
+ // Block 0x4, offset 0x27
+ {value: 0x0000, lo: 0x04},
+ {value: 0x03f5, lo: 0x80, hi: 0x8f},
+ {value: 0xe105, lo: 0x90, hi: 0x9f},
+ {value: 0x049d, lo: 0xa0, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x5, offset 0x2c
+ {value: 0x0000, lo: 0x06},
+ {value: 0xe185, lo: 0x80, hi: 0x8f},
+ {value: 0x0545, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x98},
+ {value: 0x0008, lo: 0x99, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x6, offset 0x33
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0401, lo: 0x87, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x88},
+ {value: 0x0018, lo: 0x89, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x90},
+ {value: 0x3308, lo: 0x91, hi: 0xbd},
+ {value: 0x0818, lo: 0xbe, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0x7, offset 0x3e
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0818, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x82},
+ {value: 0x0818, lo: 0x83, hi: 0x83},
+ {value: 0x3308, lo: 0x84, hi: 0x85},
+ {value: 0x0818, lo: 0x86, hi: 0x86},
+ {value: 0x3308, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0808, lo: 0x90, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xae},
+ {value: 0x0808, lo: 0xaf, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0x8, offset 0x4a
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0a08, lo: 0x80, hi: 0x87},
+ {value: 0x0c08, lo: 0x88, hi: 0x99},
+ {value: 0x0a08, lo: 0x9a, hi: 0xbf},
+ // Block 0x9, offset 0x4e
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x3308, lo: 0x80, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8c},
+ {value: 0x0c08, lo: 0x8d, hi: 0x8d},
+ {value: 0x0a08, lo: 0x8e, hi: 0x98},
+ {value: 0x0c08, lo: 0x99, hi: 0x9b},
+ {value: 0x0a08, lo: 0x9c, hi: 0xaa},
+ {value: 0x0c08, lo: 0xab, hi: 0xac},
+ {value: 0x0a08, lo: 0xad, hi: 0xb0},
+ {value: 0x0c08, lo: 0xb1, hi: 0xb1},
+ {value: 0x0a08, lo: 0xb2, hi: 0xb2},
+ {value: 0x0c08, lo: 0xb3, hi: 0xb4},
+ {value: 0x0a08, lo: 0xb5, hi: 0xb7},
+ {value: 0x0c08, lo: 0xb8, hi: 0xb9},
+ {value: 0x0a08, lo: 0xba, hi: 0xbf},
+ // Block 0xa, offset 0x5d
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0808, lo: 0x80, hi: 0xa5},
+ {value: 0x3308, lo: 0xa6, hi: 0xb0},
+ {value: 0x0808, lo: 0xb1, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xbf},
+ // Block 0xb, offset 0x62
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0808, lo: 0x80, hi: 0x89},
+ {value: 0x0a08, lo: 0x8a, hi: 0xaa},
+ {value: 0x3308, lo: 0xab, hi: 0xb3},
+ {value: 0x0808, lo: 0xb4, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xb9},
+ {value: 0x0818, lo: 0xba, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbc},
+ {value: 0x3308, lo: 0xbd, hi: 0xbd},
+ {value: 0x0818, lo: 0xbe, hi: 0xbf},
+ // Block 0xc, offset 0x6c
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0808, lo: 0x80, hi: 0x95},
+ {value: 0x3308, lo: 0x96, hi: 0x99},
+ {value: 0x0808, lo: 0x9a, hi: 0x9a},
+ {value: 0x3308, lo: 0x9b, hi: 0xa3},
+ {value: 0x0808, lo: 0xa4, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xa7},
+ {value: 0x0808, lo: 0xa8, hi: 0xa8},
+ {value: 0x3308, lo: 0xa9, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0818, lo: 0xb0, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xd, offset 0x78
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0a08, lo: 0xa0, hi: 0xa9},
+ {value: 0x0c08, lo: 0xaa, hi: 0xac},
+ {value: 0x0808, lo: 0xad, hi: 0xad},
+ {value: 0x0c08, lo: 0xae, hi: 0xae},
+ {value: 0x0a08, lo: 0xaf, hi: 0xb0},
+ {value: 0x0c08, lo: 0xb1, hi: 0xb2},
+ {value: 0x0a08, lo: 0xb3, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xb5},
+ {value: 0x0a08, lo: 0xb6, hi: 0xb8},
+ {value: 0x0c08, lo: 0xb9, hi: 0xb9},
+ {value: 0x0a08, lo: 0xba, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0xe, offset 0x86
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0040, lo: 0x80, hi: 0x92},
+ {value: 0x3308, lo: 0x93, hi: 0xa1},
+ {value: 0x0840, lo: 0xa2, hi: 0xa2},
+ {value: 0x3308, lo: 0xa3, hi: 0xbf},
+ // Block 0xf, offset 0x8b
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3308, lo: 0x80, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x10, offset 0x94
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x3008, lo: 0x81, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x85},
+ {value: 0x3008, lo: 0x86, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x3008, lo: 0x8a, hi: 0x8c},
+ {value: 0x3b08, lo: 0x8d, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x96},
+ {value: 0x3008, lo: 0x97, hi: 0x97},
+ {value: 0x0040, lo: 0x98, hi: 0xa5},
+ {value: 0x0008, lo: 0xa6, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0x11, offset 0xa4
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x3008, lo: 0x81, hi: 0x83},
+ {value: 0x3308, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0x0008, lo: 0x92, hi: 0xa8},
+ {value: 0x0040, lo: 0xa9, hi: 0xa9},
+ {value: 0x0008, lo: 0xaa, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x3308, lo: 0xbe, hi: 0xbf},
+ // Block 0x12, offset 0xb2
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3308, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0x0008, lo: 0x92, hi: 0xba},
+ {value: 0x3b08, lo: 0xbb, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x13, offset 0xbe
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0040, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x99},
+ {value: 0x0008, lo: 0x9a, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xb2},
+ {value: 0x0008, lo: 0xb3, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0x14, offset 0xca
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x89},
+ {value: 0x3b08, lo: 0x8a, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8e},
+ {value: 0x3008, lo: 0x8f, hi: 0x91},
+ {value: 0x3308, lo: 0x92, hi: 0x94},
+ {value: 0x0040, lo: 0x95, hi: 0x95},
+ {value: 0x3308, lo: 0x96, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x97},
+ {value: 0x3008, lo: 0x98, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xa5},
+ {value: 0x0008, lo: 0xa6, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xb1},
+ {value: 0x3008, lo: 0xb2, hi: 0xb3},
+ {value: 0x0018, lo: 0xb4, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0x15, offset 0xdb
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0xb0},
+ {value: 0x3308, lo: 0xb1, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb2},
+ {value: 0x08f1, lo: 0xb3, hi: 0xb3},
+ {value: 0x3308, lo: 0xb4, hi: 0xb9},
+ {value: 0x3b08, lo: 0xba, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbe},
+ {value: 0x0018, lo: 0xbf, hi: 0xbf},
+ // Block 0x16, offset 0xe5
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x3308, lo: 0x87, hi: 0x8e},
+ {value: 0x0018, lo: 0x8f, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0xbf},
+ // Block 0x17, offset 0xec
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x84},
+ {value: 0x0040, lo: 0x85, hi: 0x85},
+ {value: 0x0008, lo: 0x86, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x3308, lo: 0x88, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9b},
+ {value: 0x0961, lo: 0x9c, hi: 0x9c},
+ {value: 0x0999, lo: 0x9d, hi: 0x9d},
+ {value: 0x0008, lo: 0x9e, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0x18, offset 0xf9
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x8a},
+ {value: 0x0008, lo: 0x8b, hi: 0x8b},
+ {value: 0xe03d, lo: 0x8c, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0x97},
+ {value: 0x3308, lo: 0x98, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0018, lo: 0xaa, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xb6},
+ {value: 0x3308, lo: 0xb7, hi: 0xb7},
+ {value: 0x0018, lo: 0xb8, hi: 0xb8},
+ {value: 0x3308, lo: 0xb9, hi: 0xb9},
+ {value: 0x0018, lo: 0xba, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x19, offset 0x10a
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x85},
+ {value: 0x3308, lo: 0x86, hi: 0x86},
+ {value: 0x0018, lo: 0x87, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8d},
+ {value: 0x0018, lo: 0x8e, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0xbf},
+ // Block 0x1a, offset 0x111
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x3008, lo: 0xab, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xb0},
+ {value: 0x3008, lo: 0xb1, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb7},
+ {value: 0x3008, lo: 0xb8, hi: 0xb8},
+ {value: 0x3b08, lo: 0xb9, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbc},
+ {value: 0x3308, lo: 0xbd, hi: 0xbe},
+ {value: 0x0008, lo: 0xbf, hi: 0xbf},
+ // Block 0x1b, offset 0x11c
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0018, lo: 0x8a, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x95},
+ {value: 0x3008, lo: 0x96, hi: 0x97},
+ {value: 0x3308, lo: 0x98, hi: 0x99},
+ {value: 0x0008, lo: 0x9a, hi: 0x9d},
+ {value: 0x3308, lo: 0x9e, hi: 0xa0},
+ {value: 0x0008, lo: 0xa1, hi: 0xa1},
+ {value: 0x3008, lo: 0xa2, hi: 0xa4},
+ {value: 0x0008, lo: 0xa5, hi: 0xa6},
+ {value: 0x3008, lo: 0xa7, hi: 0xad},
+ {value: 0x0008, lo: 0xae, hi: 0xb0},
+ {value: 0x3308, lo: 0xb1, hi: 0xb4},
+ {value: 0x0008, lo: 0xb5, hi: 0xbf},
+ // Block 0x1c, offset 0x12b
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x0008, lo: 0x80, hi: 0x81},
+ {value: 0x3308, lo: 0x82, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x84},
+ {value: 0x3308, lo: 0x85, hi: 0x86},
+ {value: 0x3008, lo: 0x87, hi: 0x8c},
+ {value: 0x3308, lo: 0x8d, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x8e},
+ {value: 0x3008, lo: 0x8f, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x3008, lo: 0x9a, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0x1d, offset 0x139
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0040, lo: 0x80, hi: 0x86},
+ {value: 0x055d, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8c},
+ {value: 0x055d, lo: 0x8d, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xba},
+ {value: 0x0018, lo: 0xbb, hi: 0xbb},
+ {value: 0xe105, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbf},
+ // Block 0x1e, offset 0x143
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0018, lo: 0x80, hi: 0xbf},
+ // Block 0x1f, offset 0x145
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0xa0},
+ {value: 0x2018, lo: 0xa1, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xbf},
+ // Block 0x20, offset 0x14a
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xa7},
+ {value: 0x2018, lo: 0xa8, hi: 0xbf},
+ // Block 0x21, offset 0x14d
+ {value: 0x0000, lo: 0x02},
+ {value: 0x2018, lo: 0x80, hi: 0x82},
+ {value: 0x0018, lo: 0x83, hi: 0xbf},
+ // Block 0x22, offset 0x150
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0008, lo: 0x80, hi: 0xbf},
+ // Block 0x23, offset 0x152
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0x99},
+ {value: 0x0008, lo: 0x9a, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x24, offset 0x15e
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb7},
+ {value: 0x0008, lo: 0xb8, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x25, offset 0x169
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x0040, lo: 0x81, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0xbf},
+ // Block 0x26, offset 0x171
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0x0008, lo: 0x92, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0xbf},
+ // Block 0x27, offset 0x177
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0x28, offset 0x17d
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x29, offset 0x182
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb7},
+ {value: 0xe045, lo: 0xb8, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0x2a, offset 0x187
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0xbf},
+ // Block 0x2b, offset 0x18a
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xac},
+ {value: 0x0018, lo: 0xad, hi: 0xae},
+ {value: 0x0008, lo: 0xaf, hi: 0xbf},
+ // Block 0x2c, offset 0x18e
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9c},
+ {value: 0x0040, lo: 0x9d, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x2d, offset 0x194
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x0018, lo: 0xab, hi: 0xb0},
+ {value: 0x0008, lo: 0xb1, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0x2e, offset 0x199
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x91},
+ {value: 0x3308, lo: 0x92, hi: 0x93},
+ {value: 0x3b08, lo: 0x94, hi: 0x94},
+ {value: 0x0040, lo: 0x95, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb3},
+ {value: 0x3b08, lo: 0xb4, hi: 0xb4},
+ {value: 0x0018, lo: 0xb5, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x2f, offset 0x1a5
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x91},
+ {value: 0x3308, lo: 0x92, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xad},
+ {value: 0x0008, lo: 0xae, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbf},
+ // Block 0x30, offset 0x1af
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0xb3},
+ {value: 0x3340, lo: 0xb4, hi: 0xb5},
+ {value: 0x3008, lo: 0xb6, hi: 0xb6},
+ {value: 0x3308, lo: 0xb7, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x31, offset 0x1b5
+ {value: 0x0000, lo: 0x10},
+ {value: 0x3008, lo: 0x80, hi: 0x85},
+ {value: 0x3308, lo: 0x86, hi: 0x86},
+ {value: 0x3008, lo: 0x87, hi: 0x88},
+ {value: 0x3308, lo: 0x89, hi: 0x91},
+ {value: 0x3b08, lo: 0x92, hi: 0x92},
+ {value: 0x3308, lo: 0x93, hi: 0x93},
+ {value: 0x0018, lo: 0x94, hi: 0x96},
+ {value: 0x0008, lo: 0x97, hi: 0x97},
+ {value: 0x0018, lo: 0x98, hi: 0x9b},
+ {value: 0x0008, lo: 0x9c, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x32, offset 0x1c6
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0018, lo: 0x80, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x86},
+ {value: 0x0218, lo: 0x87, hi: 0x87},
+ {value: 0x0018, lo: 0x88, hi: 0x8a},
+ {value: 0x33c0, lo: 0x8b, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0208, lo: 0xa0, hi: 0xbf},
+ // Block 0x33, offset 0x1d0
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0208, lo: 0x80, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0x34, offset 0x1d3
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x84},
+ {value: 0x3308, lo: 0x85, hi: 0x86},
+ {value: 0x0208, lo: 0x87, hi: 0xa8},
+ {value: 0x3308, lo: 0xa9, hi: 0xa9},
+ {value: 0x0208, lo: 0xaa, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x35, offset 0x1db
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbf},
+ // Block 0x36, offset 0x1de
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x3308, lo: 0xa0, hi: 0xa2},
+ {value: 0x3008, lo: 0xa3, hi: 0xa6},
+ {value: 0x3308, lo: 0xa7, hi: 0xa8},
+ {value: 0x3008, lo: 0xa9, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb2},
+ {value: 0x3008, lo: 0xb3, hi: 0xb8},
+ {value: 0x3308, lo: 0xb9, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0x37, offset 0x1eb
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0018, lo: 0x80, hi: 0x80},
+ {value: 0x0040, lo: 0x81, hi: 0x83},
+ {value: 0x0018, lo: 0x84, hi: 0x85},
+ {value: 0x0008, lo: 0x86, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0x38, offset 0x1f3
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x39, offset 0x1f7
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0028, lo: 0x9a, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0xbf},
+ // Block 0x3a, offset 0x1fe
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x96},
+ {value: 0x3308, lo: 0x97, hi: 0x98},
+ {value: 0x3008, lo: 0x99, hi: 0x9a},
+ {value: 0x3308, lo: 0x9b, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x3b, offset 0x206
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x0008, lo: 0x80, hi: 0x94},
+ {value: 0x3008, lo: 0x95, hi: 0x95},
+ {value: 0x3308, lo: 0x96, hi: 0x96},
+ {value: 0x3008, lo: 0x97, hi: 0x97},
+ {value: 0x3308, lo: 0x98, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x3b08, lo: 0xa0, hi: 0xa0},
+ {value: 0x3008, lo: 0xa1, hi: 0xa1},
+ {value: 0x3308, lo: 0xa2, hi: 0xa2},
+ {value: 0x3008, lo: 0xa3, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xac},
+ {value: 0x3008, lo: 0xad, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0x3c, offset 0x216
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa6},
+ {value: 0x0008, lo: 0xa7, hi: 0xa7},
+ {value: 0x0018, lo: 0xa8, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xbd},
+ {value: 0x3318, lo: 0xbe, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x3d, offset 0x222
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0040, lo: 0x80, hi: 0xbf},
+ // Block 0x3e, offset 0x224
+ {value: 0x0000, lo: 0x09},
+ {value: 0x3308, lo: 0x80, hi: 0x83},
+ {value: 0x3008, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0xb3},
+ {value: 0x3308, lo: 0xb4, hi: 0xb4},
+ {value: 0x3008, lo: 0xb5, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbc},
+ {value: 0x3008, lo: 0xbd, hi: 0xbf},
+ // Block 0x3f, offset 0x22e
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3008, lo: 0x80, hi: 0x81},
+ {value: 0x3308, lo: 0x82, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x83},
+ {value: 0x3808, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0xaa},
+ {value: 0x3308, lo: 0xab, hi: 0xb3},
+ {value: 0x0018, lo: 0xb4, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0x40, offset 0x23a
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3308, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xa0},
+ {value: 0x3008, lo: 0xa1, hi: 0xa1},
+ {value: 0x3308, lo: 0xa2, hi: 0xa5},
+ {value: 0x3008, lo: 0xa6, hi: 0xa7},
+ {value: 0x3308, lo: 0xa8, hi: 0xa9},
+ {value: 0x3808, lo: 0xaa, hi: 0xaa},
+ {value: 0x3b08, lo: 0xab, hi: 0xab},
+ {value: 0x3308, lo: 0xac, hi: 0xad},
+ {value: 0x0008, lo: 0xae, hi: 0xbf},
+ // Block 0x41, offset 0x246
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0xa5},
+ {value: 0x3308, lo: 0xa6, hi: 0xa6},
+ {value: 0x3008, lo: 0xa7, hi: 0xa7},
+ {value: 0x3308, lo: 0xa8, hi: 0xa9},
+ {value: 0x3008, lo: 0xaa, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xad},
+ {value: 0x3008, lo: 0xae, hi: 0xae},
+ {value: 0x3308, lo: 0xaf, hi: 0xb1},
+ {value: 0x3808, lo: 0xb2, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbb},
+ {value: 0x0018, lo: 0xbc, hi: 0xbf},
+ // Block 0x42, offset 0x252
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xa3},
+ {value: 0x3008, lo: 0xa4, hi: 0xab},
+ {value: 0x3308, lo: 0xac, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xba},
+ {value: 0x0018, lo: 0xbb, hi: 0xbf},
+ // Block 0x43, offset 0x25a
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8c},
+ {value: 0x0008, lo: 0x8d, hi: 0xbd},
+ {value: 0x0018, lo: 0xbe, hi: 0xbf},
+ // Block 0x44, offset 0x25f
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0e29, lo: 0x80, hi: 0x80},
+ {value: 0x0e41, lo: 0x81, hi: 0x81},
+ {value: 0x0e59, lo: 0x82, hi: 0x82},
+ {value: 0x0e71, lo: 0x83, hi: 0x83},
+ {value: 0x0e89, lo: 0x84, hi: 0x85},
+ {value: 0x0ea1, lo: 0x86, hi: 0x86},
+ {value: 0x0eb9, lo: 0x87, hi: 0x87},
+ {value: 0x057d, lo: 0x88, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x8f},
+ {value: 0x059d, lo: 0x90, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbc},
+ {value: 0x059d, lo: 0xbd, hi: 0xbf},
+ // Block 0x45, offset 0x26c
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0018, lo: 0x80, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x3308, lo: 0x90, hi: 0x92},
+ {value: 0x0018, lo: 0x93, hi: 0x93},
+ {value: 0x3308, lo: 0x94, hi: 0xa0},
+ {value: 0x3008, lo: 0xa1, hi: 0xa1},
+ {value: 0x3308, lo: 0xa2, hi: 0xa8},
+ {value: 0x0008, lo: 0xa9, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xad},
+ {value: 0x0008, lo: 0xae, hi: 0xb3},
+ {value: 0x3308, lo: 0xb4, hi: 0xb4},
+ {value: 0x0008, lo: 0xb5, hi: 0xb6},
+ {value: 0x3008, lo: 0xb7, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xb9},
+ {value: 0x0008, lo: 0xba, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0x46, offset 0x27d
+ {value: 0x0000, lo: 0x03},
+ {value: 0x3308, lo: 0x80, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xba},
+ {value: 0x3308, lo: 0xbb, hi: 0xbf},
+ // Block 0x47, offset 0x281
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x87},
+ {value: 0xe045, lo: 0x88, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x97},
+ {value: 0xe045, lo: 0x98, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa7},
+ {value: 0xe045, lo: 0xa8, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb7},
+ {value: 0xe045, lo: 0xb8, hi: 0xbf},
+ // Block 0x48, offset 0x28c
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0x8f},
+ {value: 0x3318, lo: 0x90, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xbf},
+ // Block 0x49, offset 0x290
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0018, lo: 0x80, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x84},
+ {value: 0x0018, lo: 0x85, hi: 0x88},
+ {value: 0x24c1, lo: 0x89, hi: 0x89},
+ {value: 0x0018, lo: 0x8a, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbf},
+ // Block 0x4a, offset 0x299
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0018, lo: 0x80, hi: 0xab},
+ {value: 0x24f1, lo: 0xac, hi: 0xac},
+ {value: 0x2529, lo: 0xad, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xae},
+ {value: 0x2579, lo: 0xaf, hi: 0xaf},
+ {value: 0x25b1, lo: 0xb0, hi: 0xb0},
+ {value: 0x0018, lo: 0xb1, hi: 0xbf},
+ // Block 0x4b, offset 0x2a1
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x9f},
+ {value: 0x0080, lo: 0xa0, hi: 0xa0},
+ {value: 0x0018, lo: 0xa1, hi: 0xad},
+ {value: 0x0080, lo: 0xae, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbf},
+ // Block 0x4c, offset 0x2a7
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0xa8},
+ {value: 0x09dd, lo: 0xa9, hi: 0xa9},
+ {value: 0x09fd, lo: 0xaa, hi: 0xaa},
+ {value: 0x0018, lo: 0xab, hi: 0xbf},
+ // Block 0x4d, offset 0x2ac
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xbf},
+ // Block 0x4e, offset 0x2af
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x8b},
+ {value: 0x28c1, lo: 0x8c, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0xbf},
+ // Block 0x4f, offset 0x2b3
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0xb3},
+ {value: 0x0e7e, lo: 0xb4, hi: 0xb4},
+ {value: 0x292a, lo: 0xb5, hi: 0xb5},
+ {value: 0x0e9e, lo: 0xb6, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xbf},
+ // Block 0x50, offset 0x2b9
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x9b},
+ {value: 0x2941, lo: 0x9c, hi: 0x9c},
+ {value: 0x0018, lo: 0x9d, hi: 0xbf},
+ // Block 0x51, offset 0x2bd
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xbf},
+ // Block 0x52, offset 0x2c1
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x97},
+ {value: 0x0018, lo: 0x98, hi: 0xbf},
+ // Block 0x53, offset 0x2c5
+ {value: 0x0000, lo: 0x05},
+ {value: 0xe185, lo: 0x80, hi: 0x8f},
+ {value: 0x03f5, lo: 0x90, hi: 0x9f},
+ {value: 0x0ebd, lo: 0xa0, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x54, offset 0x2cb
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xa5},
+ {value: 0x0040, lo: 0xa6, hi: 0xa6},
+ {value: 0x0008, lo: 0xa7, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xac},
+ {value: 0x0008, lo: 0xad, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x55, offset 0x2d3
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xae},
+ {value: 0xe075, lo: 0xaf, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0x56, offset 0x2da
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xb7},
+ {value: 0x0008, lo: 0xb8, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x57, offset 0x2e5
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x8e},
+ {value: 0x0040, lo: 0x8f, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x3308, lo: 0xa0, hi: 0xbf},
+ // Block 0x58, offset 0x2ef
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xae},
+ {value: 0x0008, lo: 0xaf, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbf},
+ // Block 0x59, offset 0x2f3
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0xbf},
+ // Block 0x5a, offset 0x2f6
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9e},
+ {value: 0x0ef5, lo: 0x9f, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xbf},
+ // Block 0x5b, offset 0x2fc
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xb2},
+ {value: 0x0f15, lo: 0xb3, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbf},
+ // Block 0x5c, offset 0x300
+ {value: 0x0020, lo: 0x01},
+ {value: 0x0f35, lo: 0x80, hi: 0xbf},
+ // Block 0x5d, offset 0x302
+ {value: 0x0020, lo: 0x02},
+ {value: 0x1735, lo: 0x80, hi: 0x8f},
+ {value: 0x1915, lo: 0x90, hi: 0xbf},
+ // Block 0x5e, offset 0x305
+ {value: 0x0020, lo: 0x01},
+ {value: 0x1f15, lo: 0x80, hi: 0xbf},
+ // Block 0x5f, offset 0x307
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0xbf},
+ // Block 0x60, offset 0x30a
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x98},
+ {value: 0x3308, lo: 0x99, hi: 0x9a},
+ {value: 0x29e2, lo: 0x9b, hi: 0x9b},
+ {value: 0x2a0a, lo: 0x9c, hi: 0x9c},
+ {value: 0x0008, lo: 0x9d, hi: 0x9e},
+ {value: 0x2a31, lo: 0x9f, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa0},
+ {value: 0x0008, lo: 0xa1, hi: 0xbf},
+ // Block 0x61, offset 0x314
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xbe},
+ {value: 0x2a69, lo: 0xbf, hi: 0xbf},
+ // Block 0x62, offset 0x317
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x0040, lo: 0x80, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xb0},
+ {value: 0x2a35, lo: 0xb1, hi: 0xb1},
+ {value: 0x2a55, lo: 0xb2, hi: 0xb2},
+ {value: 0x2a75, lo: 0xb3, hi: 0xb3},
+ {value: 0x2a95, lo: 0xb4, hi: 0xb4},
+ {value: 0x2a75, lo: 0xb5, hi: 0xb5},
+ {value: 0x2ab5, lo: 0xb6, hi: 0xb6},
+ {value: 0x2ad5, lo: 0xb7, hi: 0xb7},
+ {value: 0x2af5, lo: 0xb8, hi: 0xb9},
+ {value: 0x2b15, lo: 0xba, hi: 0xbb},
+ {value: 0x2b35, lo: 0xbc, hi: 0xbd},
+ {value: 0x2b15, lo: 0xbe, hi: 0xbf},
+ // Block 0x63, offset 0x326
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x64, offset 0x32a
+ {value: 0x0030, lo: 0x04},
+ {value: 0x2aa2, lo: 0x80, hi: 0x9d},
+ {value: 0x305a, lo: 0x9e, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x30a2, lo: 0xa0, hi: 0xbf},
+ // Block 0x65, offset 0x32f
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0x66, offset 0x332
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbf},
+ // Block 0x67, offset 0x336
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xbd},
+ {value: 0x0018, lo: 0xbe, hi: 0xbf},
+ // Block 0x68, offset 0x33b
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xbf},
+ // Block 0x69, offset 0x340
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0xa5},
+ {value: 0x0018, lo: 0xa6, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb1},
+ {value: 0x0018, lo: 0xb2, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbf},
+ // Block 0x6a, offset 0x346
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0040, lo: 0x80, hi: 0x81},
+ {value: 0xe00d, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0x83},
+ {value: 0x03f5, lo: 0x84, hi: 0x84},
+ {value: 0x1329, lo: 0x85, hi: 0x85},
+ {value: 0x447d, lo: 0x86, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0xb6},
+ {value: 0x0008, lo: 0xb7, hi: 0xb7},
+ {value: 0x2009, lo: 0xb8, hi: 0xb8},
+ {value: 0x6e89, lo: 0xb9, hi: 0xb9},
+ {value: 0x0008, lo: 0xba, hi: 0xbf},
+ // Block 0x6b, offset 0x352
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x0008, lo: 0x80, hi: 0x81},
+ {value: 0x3308, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0x85},
+ {value: 0x3b08, lo: 0x86, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x8a},
+ {value: 0x3308, lo: 0x8b, hi: 0x8b},
+ {value: 0x0008, lo: 0x8c, hi: 0xa2},
+ {value: 0x3008, lo: 0xa3, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xa6},
+ {value: 0x3008, lo: 0xa7, hi: 0xa7},
+ {value: 0x0018, lo: 0xa8, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x6c, offset 0x361
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0208, lo: 0x80, hi: 0xb1},
+ {value: 0x0108, lo: 0xb2, hi: 0xb2},
+ {value: 0x0008, lo: 0xb3, hi: 0xb3},
+ {value: 0x0018, lo: 0xb4, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbf},
+ // Block 0x6d, offset 0x367
+ {value: 0x0000, lo: 0x03},
+ {value: 0x3008, lo: 0x80, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xbf},
+ // Block 0x6e, offset 0x36b
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x3008, lo: 0x80, hi: 0x83},
+ {value: 0x3b08, lo: 0x84, hi: 0x84},
+ {value: 0x3308, lo: 0x85, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x8d},
+ {value: 0x0018, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x3308, lo: 0xa0, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb7},
+ {value: 0x0018, lo: 0xb8, hi: 0xba},
+ {value: 0x0008, lo: 0xbb, hi: 0xbb},
+ {value: 0x0018, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0x6f, offset 0x37a
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xa5},
+ {value: 0x3308, lo: 0xa6, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x70, offset 0x37f
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x3308, lo: 0x87, hi: 0x91},
+ {value: 0x3008, lo: 0x92, hi: 0x92},
+ {value: 0x3808, lo: 0x93, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x9e},
+ {value: 0x0018, lo: 0x9f, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0x71, offset 0x387
+ {value: 0x0000, lo: 0x09},
+ {value: 0x3308, lo: 0x80, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xb9},
+ {value: 0x3008, lo: 0xba, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x72, offset 0x391
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x3808, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8e},
+ {value: 0x0008, lo: 0x8f, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xa5},
+ {value: 0x0008, lo: 0xa6, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x73, offset 0x39c
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xa8},
+ {value: 0x3308, lo: 0xa9, hi: 0xae},
+ {value: 0x3008, lo: 0xaf, hi: 0xb0},
+ {value: 0x3308, lo: 0xb1, hi: 0xb2},
+ {value: 0x3008, lo: 0xb3, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x74, offset 0x3a4
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0008, lo: 0x80, hi: 0x82},
+ {value: 0x3308, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x8b},
+ {value: 0x3308, lo: 0x8c, hi: 0x8c},
+ {value: 0x3008, lo: 0x8d, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9b},
+ {value: 0x0018, lo: 0x9c, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xb9},
+ {value: 0x0008, lo: 0xba, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbc},
+ {value: 0x3008, lo: 0xbd, hi: 0xbd},
+ {value: 0x0008, lo: 0xbe, hi: 0xbf},
+ // Block 0x75, offset 0x3b5
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb0},
+ {value: 0x0008, lo: 0xb1, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb4},
+ {value: 0x0008, lo: 0xb5, hi: 0xb6},
+ {value: 0x3308, lo: 0xb7, hi: 0xb8},
+ {value: 0x0008, lo: 0xb9, hi: 0xbd},
+ {value: 0x3308, lo: 0xbe, hi: 0xbf},
+ // Block 0x76, offset 0x3be
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x9a},
+ {value: 0x0008, lo: 0x9b, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xaa},
+ {value: 0x3008, lo: 0xab, hi: 0xab},
+ {value: 0x3308, lo: 0xac, hi: 0xad},
+ {value: 0x3008, lo: 0xae, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb4},
+ {value: 0x3008, lo: 0xb5, hi: 0xb5},
+ {value: 0x3b08, lo: 0xb6, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x77, offset 0x3ce
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x88},
+ {value: 0x0008, lo: 0x89, hi: 0x8e},
+ {value: 0x0040, lo: 0x8f, hi: 0x90},
+ {value: 0x0008, lo: 0x91, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x78, offset 0x3db
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9b},
+ {value: 0x449d, lo: 0x9c, hi: 0x9c},
+ {value: 0x44b5, lo: 0x9d, hi: 0x9d},
+ {value: 0x2971, lo: 0x9e, hi: 0x9e},
+ {value: 0xe06d, lo: 0x9f, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xaf},
+ {value: 0x44cd, lo: 0xb0, hi: 0xbf},
+ // Block 0x79, offset 0x3e5
+ {value: 0x0000, lo: 0x04},
+ {value: 0x44ed, lo: 0x80, hi: 0x8f},
+ {value: 0x450d, lo: 0x90, hi: 0x9f},
+ {value: 0x452d, lo: 0xa0, hi: 0xaf},
+ {value: 0x450d, lo: 0xb0, hi: 0xbf},
+ // Block 0x7a, offset 0x3ea
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0xa2},
+ {value: 0x3008, lo: 0xa3, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xa5},
+ {value: 0x3008, lo: 0xa6, hi: 0xa7},
+ {value: 0x3308, lo: 0xa8, hi: 0xa8},
+ {value: 0x3008, lo: 0xa9, hi: 0xaa},
+ {value: 0x0018, lo: 0xab, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xac},
+ {value: 0x3b08, lo: 0xad, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x7b, offset 0x3f7
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbf},
+ // Block 0x7c, offset 0x3fb
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x8a},
+ {value: 0x0018, lo: 0x8b, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0x7d, offset 0x400
+ {value: 0x0020, lo: 0x01},
+ {value: 0x454d, lo: 0x80, hi: 0xbf},
+ // Block 0x7e, offset 0x402
+ {value: 0x0020, lo: 0x03},
+ {value: 0x4d4d, lo: 0x80, hi: 0x94},
+ {value: 0x4b0d, lo: 0x95, hi: 0x95},
+ {value: 0x4fed, lo: 0x96, hi: 0xbf},
+ // Block 0x7f, offset 0x406
+ {value: 0x0020, lo: 0x01},
+ {value: 0x552d, lo: 0x80, hi: 0xbf},
+ // Block 0x80, offset 0x408
+ {value: 0x0020, lo: 0x03},
+ {value: 0x5d2d, lo: 0x80, hi: 0x84},
+ {value: 0x568d, lo: 0x85, hi: 0x85},
+ {value: 0x5dcd, lo: 0x86, hi: 0xbf},
+ // Block 0x81, offset 0x40c
+ {value: 0x0020, lo: 0x08},
+ {value: 0x6b8d, lo: 0x80, hi: 0x8f},
+ {value: 0x6d4d, lo: 0x90, hi: 0x90},
+ {value: 0x6d8d, lo: 0x91, hi: 0xab},
+ {value: 0x6ea1, lo: 0xac, hi: 0xac},
+ {value: 0x70ed, lo: 0xad, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x710d, lo: 0xb0, hi: 0xbf},
+ // Block 0x82, offset 0x415
+ {value: 0x0020, lo: 0x05},
+ {value: 0x730d, lo: 0x80, hi: 0xad},
+ {value: 0x656d, lo: 0xae, hi: 0xae},
+ {value: 0x78cd, lo: 0xaf, hi: 0xb5},
+ {value: 0x6f8d, lo: 0xb6, hi: 0xb6},
+ {value: 0x79ad, lo: 0xb7, hi: 0xbf},
+ // Block 0x83, offset 0x41b
+ {value: 0x0028, lo: 0x03},
+ {value: 0x7c21, lo: 0x80, hi: 0x82},
+ {value: 0x7be1, lo: 0x83, hi: 0x83},
+ {value: 0x7c99, lo: 0x84, hi: 0xbf},
+ // Block 0x84, offset 0x41f
+ {value: 0x0038, lo: 0x0f},
+ {value: 0x9db1, lo: 0x80, hi: 0x83},
+ {value: 0x9e59, lo: 0x84, hi: 0x85},
+ {value: 0x9e91, lo: 0x86, hi: 0x87},
+ {value: 0x9ec9, lo: 0x88, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0xa089, lo: 0x92, hi: 0x97},
+ {value: 0xa1a1, lo: 0x98, hi: 0x9c},
+ {value: 0xa281, lo: 0x9d, hi: 0xb3},
+ {value: 0x9d41, lo: 0xb4, hi: 0xb4},
+ {value: 0x9db1, lo: 0xb5, hi: 0xb5},
+ {value: 0xa789, lo: 0xb6, hi: 0xbb},
+ {value: 0xa869, lo: 0xbc, hi: 0xbc},
+ {value: 0xa7f9, lo: 0xbd, hi: 0xbd},
+ {value: 0xa8d9, lo: 0xbe, hi: 0xbf},
+ // Block 0x85, offset 0x42f
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8c},
+ {value: 0x0008, lo: 0x8d, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbb},
+ {value: 0x0008, lo: 0xbc, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbe},
+ {value: 0x0008, lo: 0xbf, hi: 0xbf},
+ // Block 0x86, offset 0x439
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0xbf},
+ // Block 0x87, offset 0x43e
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0x88, offset 0x441
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x86},
+ {value: 0x0018, lo: 0x87, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xbf},
+ // Block 0x89, offset 0x447
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x8e},
+ {value: 0x0040, lo: 0x8f, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa0},
+ {value: 0x0040, lo: 0xa1, hi: 0xbf},
+ // Block 0x8a, offset 0x44e
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0040, lo: 0x80, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbc},
+ {value: 0x3308, lo: 0xbd, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0x8b, offset 0x453
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0x9c},
+ {value: 0x0040, lo: 0x9d, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x8c, offset 0x457
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x9f},
+ {value: 0x3308, lo: 0xa0, hi: 0xa0},
+ {value: 0x0018, lo: 0xa1, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0x8d, offset 0x45d
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xac},
+ {value: 0x0008, lo: 0xad, hi: 0xbf},
+ // Block 0x8e, offset 0x462
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x89},
+ {value: 0x0018, lo: 0x8a, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0x8f, offset 0x46b
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9e},
+ {value: 0x0018, lo: 0x9f, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x90, offset 0x470
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0xbf},
+ // Block 0x91, offset 0x476
+ {value: 0x0000, lo: 0x06},
+ {value: 0xe145, lo: 0x80, hi: 0x87},
+ {value: 0xe1c5, lo: 0x88, hi: 0x8f},
+ {value: 0xe145, lo: 0x90, hi: 0x97},
+ {value: 0x8b0d, lo: 0x98, hi: 0x9f},
+ {value: 0x8b25, lo: 0xa0, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xbf},
+ // Block 0x92, offset 0x47d
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xaf},
+ {value: 0x8b25, lo: 0xb0, hi: 0xb7},
+ {value: 0x8b0d, lo: 0xb8, hi: 0xbf},
+ // Block 0x93, offset 0x484
+ {value: 0x0000, lo: 0x06},
+ {value: 0xe145, lo: 0x80, hi: 0x87},
+ {value: 0xe1c5, lo: 0x88, hi: 0x8f},
+ {value: 0xe145, lo: 0x90, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0x94, offset 0x48b
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x95, offset 0x48f
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xae},
+ {value: 0x0018, lo: 0xaf, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0x96, offset 0x494
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x97, offset 0x497
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xbf},
+ // Block 0x98, offset 0x49c
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0808, lo: 0x80, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x87},
+ {value: 0x0808, lo: 0x88, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0808, lo: 0x8a, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb6},
+ {value: 0x0808, lo: 0xb7, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbb},
+ {value: 0x0808, lo: 0xbc, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbe},
+ {value: 0x0808, lo: 0xbf, hi: 0xbf},
+ // Block 0x99, offset 0x4a8
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0808, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x96},
+ {value: 0x0818, lo: 0x97, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb6},
+ {value: 0x0818, lo: 0xb7, hi: 0xbf},
+ // Block 0x9a, offset 0x4ae
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0808, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0xa6},
+ {value: 0x0818, lo: 0xa7, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0x9b, offset 0x4b3
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xb3},
+ {value: 0x0808, lo: 0xb4, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xba},
+ {value: 0x0818, lo: 0xbb, hi: 0xbf},
+ // Block 0x9c, offset 0x4ba
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0808, lo: 0x80, hi: 0x95},
+ {value: 0x0818, lo: 0x96, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9e},
+ {value: 0x0018, lo: 0x9f, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbe},
+ {value: 0x0818, lo: 0xbf, hi: 0xbf},
+ // Block 0x9d, offset 0x4c2
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0808, lo: 0x80, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbb},
+ {value: 0x0818, lo: 0xbc, hi: 0xbd},
+ {value: 0x0808, lo: 0xbe, hi: 0xbf},
+ // Block 0x9e, offset 0x4c7
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0818, lo: 0x80, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x91},
+ {value: 0x0818, lo: 0x92, hi: 0xbf},
+ // Block 0x9f, offset 0x4cb
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x0808, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x84},
+ {value: 0x3308, lo: 0x85, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x8b},
+ {value: 0x3308, lo: 0x8c, hi: 0x8f},
+ {value: 0x0808, lo: 0x90, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x94},
+ {value: 0x0808, lo: 0x95, hi: 0x97},
+ {value: 0x0040, lo: 0x98, hi: 0x98},
+ {value: 0x0808, lo: 0x99, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xa0, offset 0x4db
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0818, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x8f},
+ {value: 0x0818, lo: 0x90, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xbc},
+ {value: 0x0818, lo: 0xbd, hi: 0xbf},
+ // Block 0xa1, offset 0x4e2
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0808, lo: 0x80, hi: 0x9c},
+ {value: 0x0818, lo: 0x9d, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0xa2, offset 0x4e6
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0808, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb8},
+ {value: 0x0018, lo: 0xb9, hi: 0xbf},
+ // Block 0xa3, offset 0x4ea
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0808, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x97},
+ {value: 0x0818, lo: 0x98, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xb7},
+ {value: 0x0818, lo: 0xb8, hi: 0xbf},
+ // Block 0xa4, offset 0x4f1
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0808, lo: 0x80, hi: 0xbf},
+ // Block 0xa5, offset 0x4f3
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0808, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0xbf},
+ // Block 0xa6, offset 0x4f6
+ {value: 0x0000, lo: 0x02},
+ {value: 0x03dd, lo: 0x80, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xbf},
+ // Block 0xa7, offset 0x4f9
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0808, lo: 0x80, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xb9},
+ {value: 0x0818, lo: 0xba, hi: 0xbf},
+ // Block 0xa8, offset 0x4fd
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0908, lo: 0x80, hi: 0x80},
+ {value: 0x0a08, lo: 0x81, hi: 0xa1},
+ {value: 0x0c08, lo: 0xa2, hi: 0xa2},
+ {value: 0x0a08, lo: 0xa3, hi: 0xa3},
+ {value: 0x3308, lo: 0xa4, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xaf},
+ {value: 0x0808, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0xa9, offset 0x506
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0818, lo: 0xa0, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xaa, offset 0x50a
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0808, lo: 0x80, hi: 0x9c},
+ {value: 0x0818, lo: 0x9d, hi: 0xa6},
+ {value: 0x0808, lo: 0xa7, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xaf},
+ {value: 0x0a08, lo: 0xb0, hi: 0xb2},
+ {value: 0x0c08, lo: 0xb3, hi: 0xb3},
+ {value: 0x0a08, lo: 0xb4, hi: 0xbf},
+ // Block 0xab, offset 0x512
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0a08, lo: 0x80, hi: 0x84},
+ {value: 0x0808, lo: 0x85, hi: 0x85},
+ {value: 0x3308, lo: 0x86, hi: 0x90},
+ {value: 0x0a18, lo: 0x91, hi: 0x93},
+ {value: 0x0c18, lo: 0x94, hi: 0x94},
+ {value: 0x0818, lo: 0x95, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0xbf},
+ // Block 0xac, offset 0x51a
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0xad, offset 0x51e
+ {value: 0x0000, lo: 0x05},
+ {value: 0x3008, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xbf},
+ // Block 0xae, offset 0x524
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3308, lo: 0x80, hi: 0x85},
+ {value: 0x3b08, lo: 0x86, hi: 0x86},
+ {value: 0x0018, lo: 0x87, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x91},
+ {value: 0x0018, lo: 0x92, hi: 0xa5},
+ {value: 0x0008, lo: 0xa6, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xaf, offset 0x52d
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3308, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb6},
+ {value: 0x3008, lo: 0xb7, hi: 0xb8},
+ {value: 0x3b08, lo: 0xb9, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x0018, lo: 0xbb, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbd},
+ {value: 0x0018, lo: 0xbe, hi: 0xbf},
+ // Block 0xb0, offset 0x539
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x81},
+ {value: 0x0040, lo: 0x82, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xa8},
+ {value: 0x0040, lo: 0xa9, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0xb1, offset 0x540
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3308, lo: 0x80, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xa6},
+ {value: 0x3308, lo: 0xa7, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xb2},
+ {value: 0x3b08, lo: 0xb3, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xb5},
+ {value: 0x0008, lo: 0xb6, hi: 0xbf},
+ // Block 0xb2, offset 0x549
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0018, lo: 0x80, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x84},
+ {value: 0x3008, lo: 0x85, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb3},
+ {value: 0x0018, lo: 0xb4, hi: 0xb5},
+ {value: 0x0008, lo: 0xb6, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0xb3, offset 0x553
+ {value: 0x0000, lo: 0x06},
+ {value: 0x3308, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xb2},
+ {value: 0x3008, lo: 0xb3, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xbe},
+ {value: 0x3008, lo: 0xbf, hi: 0xbf},
+ // Block 0xb4, offset 0x55a
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x3808, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0x84},
+ {value: 0x0018, lo: 0x85, hi: 0x88},
+ {value: 0x3308, lo: 0x89, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9b},
+ {value: 0x0008, lo: 0x9c, hi: 0x9c},
+ {value: 0x0018, lo: 0x9d, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xa0},
+ {value: 0x0018, lo: 0xa1, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0xb5, offset 0x568
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x91},
+ {value: 0x0040, lo: 0x92, hi: 0x92},
+ {value: 0x0008, lo: 0x93, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xae},
+ {value: 0x3308, lo: 0xaf, hi: 0xb1},
+ {value: 0x3008, lo: 0xb2, hi: 0xb3},
+ {value: 0x3308, lo: 0xb4, hi: 0xb4},
+ {value: 0x3808, lo: 0xb5, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xb7},
+ {value: 0x0018, lo: 0xb8, hi: 0xbd},
+ {value: 0x3308, lo: 0xbe, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xb6, offset 0x575
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8e},
+ {value: 0x0008, lo: 0x8f, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9e},
+ {value: 0x0008, lo: 0x9f, hi: 0xa8},
+ {value: 0x0018, lo: 0xa9, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0xb7, offset 0x582
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0x9e},
+ {value: 0x3308, lo: 0x9f, hi: 0x9f},
+ {value: 0x3008, lo: 0xa0, hi: 0xa2},
+ {value: 0x3308, lo: 0xa3, hi: 0xa9},
+ {value: 0x3b08, lo: 0xaa, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0xb8, offset 0x58b
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xb4},
+ {value: 0x3008, lo: 0xb5, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xbf},
+ // Block 0xb9, offset 0x58f
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x3008, lo: 0x80, hi: 0x81},
+ {value: 0x3b08, lo: 0x82, hi: 0x82},
+ {value: 0x3308, lo: 0x83, hi: 0x84},
+ {value: 0x3008, lo: 0x85, hi: 0x85},
+ {value: 0x3308, lo: 0x86, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x8a},
+ {value: 0x0018, lo: 0x8b, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9c},
+ {value: 0x0018, lo: 0x9d, hi: 0x9d},
+ {value: 0x3308, lo: 0x9e, hi: 0x9e},
+ {value: 0x0008, lo: 0x9f, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0xba, offset 0x59f
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb8},
+ {value: 0x3008, lo: 0xb9, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0xbb, offset 0x5a7
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x3008, lo: 0x81, hi: 0x81},
+ {value: 0x3b08, lo: 0x82, hi: 0x82},
+ {value: 0x3308, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x85},
+ {value: 0x0018, lo: 0x86, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0xbf},
+ // Block 0xbc, offset 0x5b2
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0xae},
+ {value: 0x3008, lo: 0xaf, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb7},
+ {value: 0x3008, lo: 0xb8, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xbd, offset 0x5bb
+ {value: 0x0000, lo: 0x05},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0x9b},
+ {value: 0x3308, lo: 0x9c, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0xbf},
+ // Block 0xbe, offset 0x5c1
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbc},
+ {value: 0x3308, lo: 0xbd, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xbf, offset 0x5c9
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x84},
+ {value: 0x0040, lo: 0x85, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xbf},
+ // Block 0xc0, offset 0x5d2
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x3308, lo: 0xab, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xad},
+ {value: 0x3008, lo: 0xae, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb5},
+ {value: 0x3808, lo: 0xb6, hi: 0xb6},
+ {value: 0x3308, lo: 0xb7, hi: 0xb7},
+ {value: 0x0008, lo: 0xb8, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0xc1, offset 0x5dd
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0xbf},
+ // Block 0xc2, offset 0x5e0
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9f},
+ {value: 0x3008, lo: 0xa0, hi: 0xa1},
+ {value: 0x3308, lo: 0xa2, hi: 0xa5},
+ {value: 0x3008, lo: 0xa6, hi: 0xa6},
+ {value: 0x3308, lo: 0xa7, hi: 0xaa},
+ {value: 0x3b08, lo: 0xab, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0018, lo: 0xba, hi: 0xbf},
+ // Block 0xc3, offset 0x5ec
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xae},
+ {value: 0x3308, lo: 0xaf, hi: 0xb7},
+ {value: 0x3008, lo: 0xb8, hi: 0xb8},
+ {value: 0x3b08, lo: 0xb9, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x0018, lo: 0xbb, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0xc4, offset 0x5f5
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x049d, lo: 0xa0, hi: 0xbf},
+ // Block 0xc5, offset 0x5f8
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xa9},
+ {value: 0x0018, lo: 0xaa, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xbe},
+ {value: 0x0008, lo: 0xbf, hi: 0xbf},
+ // Block 0xc6, offset 0x5fd
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xa9},
+ {value: 0x0008, lo: 0xaa, hi: 0xbf},
+ // Block 0xc7, offset 0x602
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x90},
+ {value: 0x3008, lo: 0x91, hi: 0x93},
+ {value: 0x3308, lo: 0x94, hi: 0x97},
+ {value: 0x0040, lo: 0x98, hi: 0x99},
+ {value: 0x3308, lo: 0x9a, hi: 0x9b},
+ {value: 0x3008, lo: 0x9c, hi: 0x9f},
+ {value: 0x3b08, lo: 0xa0, hi: 0xa0},
+ {value: 0x0008, lo: 0xa1, hi: 0xa1},
+ {value: 0x0018, lo: 0xa2, hi: 0xa2},
+ {value: 0x0008, lo: 0xa3, hi: 0xa3},
+ {value: 0x3008, lo: 0xa4, hi: 0xa4},
+ {value: 0x0040, lo: 0xa5, hi: 0xbf},
+ // Block 0xc8, offset 0x60f
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x8a},
+ {value: 0x0008, lo: 0x8b, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb3},
+ {value: 0x3b08, lo: 0xb4, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb8},
+ {value: 0x3008, lo: 0xb9, hi: 0xb9},
+ {value: 0x0008, lo: 0xba, hi: 0xba},
+ {value: 0x3308, lo: 0xbb, hi: 0xbe},
+ {value: 0x0018, lo: 0xbf, hi: 0xbf},
+ // Block 0xc9, offset 0x61a
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0018, lo: 0x80, hi: 0x86},
+ {value: 0x3b08, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x90},
+ {value: 0x3308, lo: 0x91, hi: 0x96},
+ {value: 0x3008, lo: 0x97, hi: 0x98},
+ {value: 0x3308, lo: 0x99, hi: 0x9b},
+ {value: 0x0008, lo: 0x9c, hi: 0xbf},
+ // Block 0xca, offset 0x623
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x3308, lo: 0x8a, hi: 0x96},
+ {value: 0x3008, lo: 0x97, hi: 0x97},
+ {value: 0x3308, lo: 0x98, hi: 0x98},
+ {value: 0x3b08, lo: 0x99, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0x9c},
+ {value: 0x0008, lo: 0x9d, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0xa2},
+ {value: 0x0040, lo: 0xa3, hi: 0xbf},
+ // Block 0xcb, offset 0x62d
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0xcc, offset 0x630
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0xae},
+ {value: 0x3008, lo: 0xaf, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xcd, offset 0x63a
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xbf},
+ // Block 0xce, offset 0x643
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x91},
+ {value: 0x3308, lo: 0x92, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xa8},
+ {value: 0x3008, lo: 0xa9, hi: 0xa9},
+ {value: 0x3308, lo: 0xaa, hi: 0xb0},
+ {value: 0x3008, lo: 0xb1, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0xcf, offset 0x64f
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8a},
+ {value: 0x0008, lo: 0x8b, hi: 0xb0},
+ {value: 0x3308, lo: 0xb1, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0xd0, offset 0x65c
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x3308, lo: 0x80, hi: 0x83},
+ {value: 0x3b08, lo: 0x84, hi: 0x85},
+ {value: 0x0008, lo: 0x86, hi: 0x86},
+ {value: 0x3308, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa5},
+ {value: 0x0040, lo: 0xa6, hi: 0xa6},
+ {value: 0x0008, lo: 0xa7, hi: 0xa8},
+ {value: 0x0040, lo: 0xa9, hi: 0xa9},
+ {value: 0x0008, lo: 0xaa, hi: 0xbf},
+ // Block 0xd1, offset 0x669
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x3008, lo: 0x8a, hi: 0x8e},
+ {value: 0x0040, lo: 0x8f, hi: 0x8f},
+ {value: 0x3308, lo: 0x90, hi: 0x91},
+ {value: 0x0040, lo: 0x92, hi: 0x92},
+ {value: 0x3008, lo: 0x93, hi: 0x94},
+ {value: 0x3308, lo: 0x95, hi: 0x95},
+ {value: 0x3008, lo: 0x96, hi: 0x96},
+ {value: 0x3b08, lo: 0x97, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xbf},
+ // Block 0xd2, offset 0x677
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb4},
+ {value: 0x3008, lo: 0xb5, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0xd3, offset 0x67e
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xbe},
+ {value: 0x0018, lo: 0xbf, hi: 0xbf},
+ // Block 0xd4, offset 0x682
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0xbf},
+ // Block 0xd5, offset 0x685
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0xd6, offset 0x68a
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0xbf},
+ // Block 0xd7, offset 0x68d
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0340, lo: 0xb0, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0xd8, offset 0x692
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0xbf},
+ // Block 0xd9, offset 0x695
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0xda, offset 0x69c
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0040, lo: 0x80, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb4},
+ {value: 0x0018, lo: 0xb5, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbf},
+ // Block 0xdb, offset 0x6a3
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xbf},
+ // Block 0xdc, offset 0x6a7
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x83},
+ {value: 0x0018, lo: 0x84, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0xa1},
+ {value: 0x0040, lo: 0xa2, hi: 0xa2},
+ {value: 0x0008, lo: 0xa3, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbf},
+ // Block 0xdd, offset 0x6b2
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0xbf},
+ // Block 0xde, offset 0x6b5
+ {value: 0x0000, lo: 0x02},
+ {value: 0xe105, lo: 0x80, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0xdf, offset 0x6b8
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0xbf},
+ // Block 0xe0, offset 0x6bb
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8e},
+ {value: 0x3308, lo: 0x8f, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x90},
+ {value: 0x3008, lo: 0x91, hi: 0xbf},
+ // Block 0xe1, offset 0x6c1
+ {value: 0x0000, lo: 0x05},
+ {value: 0x3008, lo: 0x80, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8e},
+ {value: 0x3308, lo: 0x8f, hi: 0x92},
+ {value: 0x0008, lo: 0x93, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0xe2, offset 0x6c7
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa1},
+ {value: 0x0018, lo: 0xa2, hi: 0xa2},
+ {value: 0x0008, lo: 0xa3, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xbf},
+ // Block 0xe3, offset 0x6cd
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbf},
+ // Block 0xe4, offset 0x6d0
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xbf},
+ // Block 0xe5, offset 0x6d3
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0xbf},
+ // Block 0xe6, offset 0x6d6
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0040, lo: 0x80, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x92},
+ {value: 0x0040, lo: 0x93, hi: 0xa3},
+ {value: 0x0008, lo: 0xa4, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0xe7, offset 0x6dd
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0xe8, offset 0x6e0
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0xe9, offset 0x6e5
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9b},
+ {value: 0x0018, lo: 0x9c, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9e},
+ {value: 0x0018, lo: 0x9f, hi: 0x9f},
+ {value: 0x03c0, lo: 0xa0, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xbf},
+ // Block 0xea, offset 0x6ef
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbf},
+ // Block 0xeb, offset 0x6f2
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa8},
+ {value: 0x0018, lo: 0xa9, hi: 0xbf},
+ // Block 0xec, offset 0x6f6
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x0018, lo: 0x80, hi: 0x9d},
+ {value: 0xb5b9, lo: 0x9e, hi: 0x9e},
+ {value: 0xb601, lo: 0x9f, hi: 0x9f},
+ {value: 0xb649, lo: 0xa0, hi: 0xa0},
+ {value: 0xb6b1, lo: 0xa1, hi: 0xa1},
+ {value: 0xb719, lo: 0xa2, hi: 0xa2},
+ {value: 0xb781, lo: 0xa3, hi: 0xa3},
+ {value: 0xb7e9, lo: 0xa4, hi: 0xa4},
+ {value: 0x3018, lo: 0xa5, hi: 0xa6},
+ {value: 0x3318, lo: 0xa7, hi: 0xa9},
+ {value: 0x0018, lo: 0xaa, hi: 0xac},
+ {value: 0x3018, lo: 0xad, hi: 0xb2},
+ {value: 0x0340, lo: 0xb3, hi: 0xba},
+ {value: 0x3318, lo: 0xbb, hi: 0xbf},
+ // Block 0xed, offset 0x705
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3318, lo: 0x80, hi: 0x82},
+ {value: 0x0018, lo: 0x83, hi: 0x84},
+ {value: 0x3318, lo: 0x85, hi: 0x8b},
+ {value: 0x0018, lo: 0x8c, hi: 0xa9},
+ {value: 0x3318, lo: 0xaa, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xba},
+ {value: 0xb851, lo: 0xbb, hi: 0xbb},
+ {value: 0xb899, lo: 0xbc, hi: 0xbc},
+ {value: 0xb8e1, lo: 0xbd, hi: 0xbd},
+ {value: 0xb949, lo: 0xbe, hi: 0xbe},
+ {value: 0xb9b1, lo: 0xbf, hi: 0xbf},
+ // Block 0xee, offset 0x711
+ {value: 0x0000, lo: 0x03},
+ {value: 0xba19, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0xa8},
+ {value: 0x0040, lo: 0xa9, hi: 0xbf},
+ // Block 0xef, offset 0x715
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x81},
+ {value: 0x3318, lo: 0x82, hi: 0x84},
+ {value: 0x0018, lo: 0x85, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0xbf},
+ // Block 0xf0, offset 0x71a
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbf},
+ // Block 0xf1, offset 0x71e
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0xf2, offset 0x723
+ {value: 0x0000, lo: 0x03},
+ {value: 0x3308, lo: 0x80, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xba},
+ {value: 0x3308, lo: 0xbb, hi: 0xbf},
+ // Block 0xf3, offset 0x727
+ {value: 0x0000, lo: 0x04},
+ {value: 0x3308, lo: 0x80, hi: 0xac},
+ {value: 0x0018, lo: 0xad, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xbf},
+ // Block 0xf4, offset 0x72c
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0018, lo: 0x80, hi: 0x83},
+ {value: 0x3308, lo: 0x84, hi: 0x84},
+ {value: 0x0018, lo: 0x85, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x9a},
+ {value: 0x3308, lo: 0x9b, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xa0},
+ {value: 0x3308, lo: 0xa1, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0xf5, offset 0x735
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x3308, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x3308, lo: 0x88, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0x9a},
+ {value: 0x3308, lo: 0x9b, hi: 0xa1},
+ {value: 0x0040, lo: 0xa2, hi: 0xa2},
+ {value: 0x3308, lo: 0xa3, hi: 0xa4},
+ {value: 0x0040, lo: 0xa5, hi: 0xa5},
+ {value: 0x3308, lo: 0xa6, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xbf},
+ // Block 0xf6, offset 0x740
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb6},
+ {value: 0x0008, lo: 0xb7, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0xf7, offset 0x746
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x8e},
+ {value: 0x0018, lo: 0x8f, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0xbf},
+ // Block 0xf8, offset 0x74c
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0xab},
+ {value: 0x3308, lo: 0xac, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbe},
+ {value: 0x0018, lo: 0xbf, hi: 0xbf},
+ // Block 0xf9, offset 0x752
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0808, lo: 0x80, hi: 0x84},
+ {value: 0x0040, lo: 0x85, hi: 0x86},
+ {value: 0x0818, lo: 0x87, hi: 0x8f},
+ {value: 0x3308, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0xbf},
+ // Block 0xfa, offset 0x758
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0a08, lo: 0x80, hi: 0x83},
+ {value: 0x3308, lo: 0x84, hi: 0x8a},
+ {value: 0x0b08, lo: 0x8b, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8f},
+ {value: 0x0808, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9d},
+ {value: 0x0818, lo: 0x9e, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0xfb, offset 0x761
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0xb0},
+ {value: 0x0818, lo: 0xb1, hi: 0xbf},
+ // Block 0xfc, offset 0x764
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0818, lo: 0x80, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0xfd, offset 0x767
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0818, lo: 0x81, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0xfe, offset 0x76b
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xbf},
+ // Block 0xff, offset 0x76f
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbf},
+ // Block 0x100, offset 0x773
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xb0},
+ {value: 0x0018, lo: 0xb1, hi: 0xbf},
+ // Block 0x101, offset 0x779
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x90},
+ {value: 0x0018, lo: 0x91, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbf},
+ // Block 0x102, offset 0x77f
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x8f},
+ {value: 0xc1d9, lo: 0x90, hi: 0x90},
+ {value: 0x0018, lo: 0x91, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xbf},
+ // Block 0x103, offset 0x784
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0xa5},
+ {value: 0x0018, lo: 0xa6, hi: 0xbf},
+ // Block 0x104, offset 0x787
+ {value: 0x0000, lo: 0x0f},
+ {value: 0xc801, lo: 0x80, hi: 0x80},
+ {value: 0xc851, lo: 0x81, hi: 0x81},
+ {value: 0xc8a1, lo: 0x82, hi: 0x82},
+ {value: 0xc8f1, lo: 0x83, hi: 0x83},
+ {value: 0xc941, lo: 0x84, hi: 0x84},
+ {value: 0xc991, lo: 0x85, hi: 0x85},
+ {value: 0xc9e1, lo: 0x86, hi: 0x86},
+ {value: 0xca31, lo: 0x87, hi: 0x87},
+ {value: 0xca81, lo: 0x88, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x8f},
+ {value: 0xcad1, lo: 0x90, hi: 0x90},
+ {value: 0xcaf1, lo: 0x91, hi: 0x91},
+ {value: 0x0040, lo: 0x92, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa5},
+ {value: 0x0040, lo: 0xa6, hi: 0xbf},
+ // Block 0x105, offset 0x797
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0x106, offset 0x79e
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbf},
+ // Block 0x107, offset 0x7a1
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xbf},
+ // Block 0x108, offset 0x7a6
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbf},
+ // Block 0x109, offset 0x7aa
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xbf},
+ // Block 0x10a, offset 0x7b0
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xbf},
+ // Block 0x10b, offset 0x7b5
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0xbf},
+ // Block 0x10c, offset 0x7b9
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xb2},
+ {value: 0x0018, lo: 0xb3, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xb9},
+ {value: 0x0018, lo: 0xba, hi: 0xbf},
+ // Block 0x10d, offset 0x7bf
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0xa2},
+ {value: 0x0040, lo: 0xa3, hi: 0xa4},
+ {value: 0x0018, lo: 0xa5, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xbf},
+ // Block 0x10e, offset 0x7c5
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0xbf},
+ // Block 0x10f, offset 0x7c9
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0018, lo: 0x80, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xb7},
+ {value: 0x0018, lo: 0xb8, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0x110, offset 0x7d2
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0xbf},
+ // Block 0x111, offset 0x7d7
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0xbf},
+ // Block 0x112, offset 0x7da
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0x113, offset 0x7dd
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x114, offset 0x7e1
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xa1},
+ {value: 0x0040, lo: 0xa2, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x115, offset 0x7e5
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xa0},
+ {value: 0x0040, lo: 0xa1, hi: 0xbf},
+ // Block 0x116, offset 0x7e8
+ {value: 0x0020, lo: 0x0f},
+ {value: 0xded1, lo: 0x80, hi: 0x89},
+ {value: 0x8e35, lo: 0x8a, hi: 0x8a},
+ {value: 0xe011, lo: 0x8b, hi: 0x9c},
+ {value: 0x8e55, lo: 0x9d, hi: 0x9d},
+ {value: 0xe251, lo: 0x9e, hi: 0xa2},
+ {value: 0x8e75, lo: 0xa3, hi: 0xa3},
+ {value: 0xe2f1, lo: 0xa4, hi: 0xab},
+ {value: 0x7f0d, lo: 0xac, hi: 0xac},
+ {value: 0xe3f1, lo: 0xad, hi: 0xaf},
+ {value: 0x8e95, lo: 0xb0, hi: 0xb0},
+ {value: 0xe451, lo: 0xb1, hi: 0xb6},
+ {value: 0x8eb5, lo: 0xb7, hi: 0xb9},
+ {value: 0xe511, lo: 0xba, hi: 0xba},
+ {value: 0x8f15, lo: 0xbb, hi: 0xbb},
+ {value: 0xe531, lo: 0xbc, hi: 0xbf},
+ // Block 0x117, offset 0x7f8
+ {value: 0x0020, lo: 0x10},
+ {value: 0x93b5, lo: 0x80, hi: 0x80},
+ {value: 0xf0b1, lo: 0x81, hi: 0x86},
+ {value: 0x93d5, lo: 0x87, hi: 0x8a},
+ {value: 0xda11, lo: 0x8b, hi: 0x8b},
+ {value: 0xf171, lo: 0x8c, hi: 0x96},
+ {value: 0x9455, lo: 0x97, hi: 0x97},
+ {value: 0xf2d1, lo: 0x98, hi: 0xa3},
+ {value: 0x9475, lo: 0xa4, hi: 0xa6},
+ {value: 0xf451, lo: 0xa7, hi: 0xaa},
+ {value: 0x94d5, lo: 0xab, hi: 0xab},
+ {value: 0xf4d1, lo: 0xac, hi: 0xac},
+ {value: 0x94f5, lo: 0xad, hi: 0xad},
+ {value: 0xf4f1, lo: 0xae, hi: 0xaf},
+ {value: 0x9515, lo: 0xb0, hi: 0xb1},
+ {value: 0xf531, lo: 0xb2, hi: 0xbe},
+ {value: 0x2040, lo: 0xbf, hi: 0xbf},
+ // Block 0x118, offset 0x809
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0340, lo: 0x81, hi: 0x81},
+ {value: 0x0040, lo: 0x82, hi: 0x9f},
+ {value: 0x0340, lo: 0xa0, hi: 0xbf},
+ // Block 0x119, offset 0x80e
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0340, lo: 0x80, hi: 0xbf},
+ // Block 0x11a, offset 0x810
+ {value: 0x0000, lo: 0x01},
+ {value: 0x33c0, lo: 0x80, hi: 0xbf},
+ // Block 0x11b, offset 0x812
+ {value: 0x0000, lo: 0x02},
+ {value: 0x33c0, lo: 0x80, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+}
+
+// Total table size 42780 bytes (41KiB); checksum: 29936AB9
diff --git a/vendor/golang.org/x/net/internal/timeseries/timeseries.go b/vendor/golang.org/x/net/internal/timeseries/timeseries.go
index 685f0e7e..dc5225b6 100644
--- a/vendor/golang.org/x/net/internal/timeseries/timeseries.go
+++ b/vendor/golang.org/x/net/internal/timeseries/timeseries.go
@@ -403,9 +403,9 @@ func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, resu
// Where should scanning start?
if dstStart.After(srcStart) {
- advance := dstStart.Sub(srcStart) / srcInterval
- srcIndex += int(advance)
- srcStart = srcStart.Add(advance * srcInterval)
+ advance := int(dstStart.Sub(srcStart) / srcInterval)
+ srcIndex += advance
+ srcStart = srcStart.Add(time.Duration(advance) * srcInterval)
}
// The i'th value is computed as show below.
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 18b0f166..40adb1bb 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -19,9 +19,10 @@ github.com/apparentlymart/go-textseg/textseg
# github.com/armon/go-radix v1.0.0
## explicit
github.com/armon/go-radix
-# github.com/aws/aws-sdk-go v1.25.3
-## explicit
+# github.com/aws/aws-sdk-go v1.34.0
+## explicit; go 1.11
github.com/aws/aws-sdk-go/aws
+github.com/aws/aws-sdk-go/aws/arn
github.com/aws/aws-sdk-go/aws/awserr
github.com/aws/aws-sdk-go/aws/awsutil
github.com/aws/aws-sdk-go/aws/client
@@ -39,6 +40,7 @@ github.com/aws/aws-sdk-go/aws/endpoints
github.com/aws/aws-sdk-go/aws/request
github.com/aws/aws-sdk-go/aws/session
github.com/aws/aws-sdk-go/aws/signer/v4
+github.com/aws/aws-sdk-go/internal/context
github.com/aws/aws-sdk-go/internal/ini
github.com/aws/aws-sdk-go/internal/s3err
github.com/aws/aws-sdk-go/internal/sdkio
@@ -46,6 +48,9 @@ github.com/aws/aws-sdk-go/internal/sdkmath
github.com/aws/aws-sdk-go/internal/sdkrand
github.com/aws/aws-sdk-go/internal/sdkuri
github.com/aws/aws-sdk-go/internal/shareddefaults
+github.com/aws/aws-sdk-go/internal/strings
+github.com/aws/aws-sdk-go/internal/sync/singleflight
+github.com/aws/aws-sdk-go/private/checksum
github.com/aws/aws-sdk-go/private/protocol
github.com/aws/aws-sdk-go/private/protocol/eventstream
github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi
@@ -56,6 +61,7 @@ github.com/aws/aws-sdk-go/private/protocol/rest
github.com/aws/aws-sdk-go/private/protocol/restxml
github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil
github.com/aws/aws-sdk-go/service/s3
+github.com/aws/aws-sdk-go/service/s3/internal/arn
github.com/aws/aws-sdk-go/service/sts
github.com/aws/aws-sdk-go/service/sts/stsiface
# github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d
@@ -208,8 +214,8 @@ github.com/hashicorp/terraform-svchost/disco
# github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d
## explicit
github.com/hashicorp/yamux
-# github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af
-## explicit
+# github.com/jmespath/go-jmespath v0.3.0
+## explicit; go 1.14
github.com/jmespath/go-jmespath
# github.com/konsorten/go-windows-terminal-sequences v1.0.1
## explicit
@@ -247,7 +253,7 @@ github.com/mitchellh/reflectwalk
# github.com/oklog/run v1.0.0
## explicit
github.com/oklog/run
-# github.com/pkg/errors v0.8.0
+# github.com/pkg/errors v0.9.1
## explicit
github.com/pkg/errors
# github.com/posener/complete v1.2.1
@@ -352,7 +358,7 @@ golang.org/x/crypto/openpgp/s2k
golang.org/x/crypto/poly1305
golang.org/x/crypto/ssh
golang.org/x/crypto/ssh/terminal
-# golang.org/x/net v0.0.0-20191009170851-d66e71096ffb
+# golang.org/x/net v0.0.0-20200202094626-16171245cfb2
## explicit; go 1.11
golang.org/x/net/context
golang.org/x/net/context/ctxhttp