From d4c7ca39fd3c385674cd36ecfb0e6d5870c44534 Mon Sep 17 00:00:00 2001 From: Paul Holzinger Date: Wed, 8 May 2024 13:51:48 +0200 Subject: [PATCH 1/2] update c/{buildah,common,image,storage} to latest main Signed-off-by: Paul Holzinger --- go.mod | 57 +- go.sum | 285 +- .../computestorage/zsyscall_windows.go | 27 +- .../Microsoft/hcsshim/internal/hcs/utils.go | 4 +- .../hcsshim/internal/hns/zsyscall_windows.go | 5 +- .../internal/interop/zsyscall_windows.go | 5 +- .../internal/security/zsyscall_windows.go | 9 +- .../internal/vmcompute/zsyscall_windows.go | 55 +- .../internal/wclayer/zsyscall_windows.go | 45 +- .../internal/winapi/zsyscall_windows.go | 115 +- .../Microsoft/hcsshim/zsyscall_windows.go | 5 +- .../github.com/containers/buildah/.cirrus.yml | 2 +- .../containers/buildah/.packit.yaml | 2 - vendor/github.com/containers/buildah/Makefile | 15 +- .../github.com/containers/buildah/README.md | 2 + .../github.com/containers/buildah/digester.go | 9 +- vendor/github.com/containers/buildah/image.go | 62 +- .../containers/buildah/run_linux.go | 54 +- .../containers/common/internal/deepcopy.go | 3 +- .../common/libimage/manifest_list.go | 2 +- .../common/libimage/manifests/manifests.go | 2 +- .../containers/common/libimage/pull.go | 2 +- .../containers/common/libimage/runtime.go | 2 +- .../common/libnetwork/cni/cni_conversion.go | 2 +- .../common/libnetwork/cni/config.go | 2 +- .../common/libnetwork/etchosts/hosts.go | 2 +- .../common/libnetwork/internal/util/bridge.go | 2 +- .../common/libnetwork/internal/util/util.go | 2 +- .../common/libnetwork/netavark/config.go | 2 +- .../common/libnetwork/netavark/run.go | 2 +- .../common/libnetwork/pasta/pasta_linux.go | 191 +- .../common/libnetwork/resolvconf/resolv.go | 2 +- .../common/libnetwork/util/filters.go | 2 +- .../common/pkg/capabilities/capabilities.go | 2 +- .../containers/common/pkg/config/config.go | 9 +- .../common/pkg/config/config_unix.go | 10 + .../common/pkg/config/config_windows.go | 15 +- .../containers/common/pkg/config/new.go | 12 +- .../common/pkg/manifests/manifests.go | 2 +- .../common/pkg/secrets/define/secrets.go | 16 + .../pkg/secrets/filedriver/filedriver.go | 13 +- .../pkg/secrets/passdriver/passdriver.go | 27 +- .../containers/common/pkg/secrets/secrets.go | 13 +- .../common/pkg/secrets/secretsdb.go | 14 +- .../pkg/secrets/shelldriver/shelldriver.go | 25 +- .../common/pkg/supplemented/supplemented.go | 2 +- .../containers/common/pkg/util/util.go | 2 +- .../containers/image/v5/copy/compression.go | 2 +- .../containers/image/v5/copy/copy.go | 2 +- .../containers/image/v5/copy/encryption.go | 4 +- .../containers/image/v5/copy/manifest.go | 2 +- .../containers/image/v5/copy/multiple.go | 4 +- .../containers/image/v5/copy/single.go | 4 +- .../image/v5/docker/distribution_error.go | 2 +- .../image/v5/docker/docker_client.go | 5 +- .../image/v5/docker/docker_image_dest.go | 4 +- .../v5/docker/internal/tarfile/writer.go | 2 +- .../containers/image/v5/internal/image/oci.go | 2 +- .../internal/manifest/docker_schema2_list.go | 2 +- .../image/v5/internal/manifest/manifest.go | 2 +- .../image/v5/internal/manifest/oci_index.go | 4 +- .../internal/pkg/platform/platform_matcher.go | 2 +- .../image/v5/internal/signature/sigstore.go | 3 +- .../image/v5/manifest/docker_schema1.go | 2 +- .../containers/image/v5/manifest/oci.go | 2 +- .../image/v5/oci/layout/oci_delete.go | 2 +- .../image/v5/oci/layout/oci_dest.go | 2 +- .../image/v5/openshift/openshift-copies.go | 2 +- .../image/v5/openshift/openshift_dest.go | 2 +- .../containers/image/v5/pkg/blobcache/dest.go | 78 +- .../internal/prioritize/prioritize.go | 22 +- .../image/v5/pkg/shortnames/shortnames.go | 2 +- .../v5/pkg/sysregistriesv2/shortnames.go | 2 +- .../v5/pkg/tlsclientconfig/tlsclientconfig.go | 2 +- .../containers/image/v5/signature/docker.go | 2 +- .../image/v5/signature/fulcio_cert.go | 2 +- .../image/v5/signature/internal/rekor_set.go | 2 +- .../v5/signature/policy_eval_signedby.go | 2 +- .../image/v5/storage/storage_dest.go | 3 +- .../image/v5/storage/storage_reference.go | 2 +- .../image/v5/tarball/tarball_reference.go | 2 +- .../image/v5/tarball/tarball_src.go | 2 +- .../github.com/containers/storage/.cirrus.yml | 8 +- vendor/github.com/containers/storage/Makefile | 2 +- .../storage/drivers/devmapper/device_setup.go | 254 -- .../storage/drivers/devmapper/deviceset.go | 2889 ----------------- .../drivers/devmapper/devmapper_doc.go | 109 - .../storage/drivers/devmapper/driver.go | 272 -- .../storage/drivers/devmapper/jsoniter.go | 8 - .../storage/drivers/devmapper/mount.go | 89 - .../storage/drivers/driver_linux.go | 2 - .../storage/drivers/overlay/composefs.go | 12 +- .../drivers/register/register_devicemapper.go | 9 - vendor/github.com/containers/storage/idset.go | 30 +- .../storage/pkg/chunked/bloom_filter.go | 87 + .../storage/pkg/chunked/cache_linux.go | 400 ++- .../storage/pkg/chunked/compression_linux.go | 100 +- .../storage/pkg/chunked/dump/dump.go | 2 +- .../pkg/chunked/internal/compression.go | 57 +- .../storage/pkg/chunked/storage_linux.go | 75 +- .../containers/storage/pkg/config/config.go | 125 - .../storage/pkg/devicemapper/devmapper.go | 813 ----- .../storage/pkg/devicemapper/devmapper_log.go | 123 - .../pkg/devicemapper/devmapper_wrapper.go | 252 -- .../devmapper_wrapper_deferred_remove.go | 32 - .../devicemapper/devmapper_wrapper_dynamic.go | 7 - .../devmapper_wrapper_no_deferred_remove.go | 16 - .../devicemapper/devmapper_wrapper_static.go | 7 - .../storage/pkg/devicemapper/ioctl.go | 29 - .../storage/pkg/devicemapper/log.go | 11 - .../storage/pkg/dmesg/dmesg_linux.go | 21 - .../storage/pkg/parsers/kernel/kernel_unix.go | 24 +- .../pkg/parsers/kernel/uname_freebsd.go | 17 - .../storage/pkg/parsers/kernel/uname_linux.go | 17 - .../pkg/parsers/kernel/uname_solaris.go | 14 - .../pkg/parsers/kernel/uname_unsupported.go | 14 - .../parsers/kernel/uname_unsupported_type.go | 11 - .../containers/storage/storage.conf | 76 - .../containers/storage/storage.conf-freebsd | 76 - .../storage/types/storage_test.conf | 4 - .../github.com/coreos/go-oidc/v3/oidc/jose.go | 15 + .../github.com/coreos/go-oidc/v3/oidc/jwks.go | 13 +- .../github.com/coreos/go-oidc/v3/oidc/oidc.go | 2 +- .../coreos/go-oidc/v3/oidc/verify.go | 23 +- .../github.com/go-jose/go-jose/v4/.gitignore | 2 + .../go-jose/go-jose/v4/.golangci.yml | 53 + .../github.com/go-jose/go-jose/v4/.travis.yml | 33 + .../go-jose/go-jose/v4/CHANGELOG.md | 72 + .../go-jose/go-jose/v4/CONTRIBUTING.md | 15 + .../go-jose/go-jose/v4}/LICENSE | 5 +- .../github.com/go-jose/go-jose/v4/README.md | 114 + .../github.com/go-jose/go-jose/v4/SECURITY.md | 13 + .../go-jose/go-jose/v4/asymmetric.go | 595 ++++ .../go-jose/go-jose/v4/cipher/cbc_hmac.go | 196 ++ .../go-jose/go-jose/v4/cipher/concat_kdf.go | 75 + .../go-jose/go-jose/v4/cipher/ecdh_es.go | 86 + .../go-jose/go-jose/v4/cipher/key_wrap.go | 109 + .../github.com/go-jose/go-jose/v4/crypter.go | 593 ++++ vendor/github.com/go-jose/go-jose/v4/doc.go | 25 + .../github.com/go-jose/go-jose/v4/encoding.go | 228 ++ .../go-jose/go-jose/v4/json/LICENSE | 27 + .../go-jose/go-jose/v4/json/README.md | 13 + .../go-jose/go-jose/v4/json/decode.go | 1216 +++++++ .../go-jose/go-jose/v4/json/encode.go | 1197 +++++++ .../go-jose/go-jose/v4/json/indent.go | 141 + .../go-jose/go-jose/v4/json/scanner.go | 623 ++++ .../go-jose/go-jose/v4/json/stream.go | 484 +++ .../go-jose/go-jose/v4/json/tags.go | 44 + vendor/github.com/go-jose/go-jose/v4/jwe.go | 381 +++ vendor/github.com/go-jose/go-jose/v4/jwk.go | 812 +++++ vendor/github.com/go-jose/go-jose/v4/jws.go | 421 +++ .../github.com/go-jose/go-jose/v4/opaque.go | 144 + .../github.com/go-jose/go-jose/v4/shared.go | 525 +++ .../github.com/go-jose/go-jose/v4/signing.go | 497 +++ .../go-jose/go-jose/v4/symmetric.go | 517 +++ .../go-openapi/analysis/.golangci.yml | 53 +- .../github.com/go-openapi/analysis/README.md | 10 +- .../go-openapi/analysis/appveyor.yml | 32 - vendor/github.com/go-openapi/analysis/doc.go | 10 +- .../github.com/go-openapi/analysis/flatten.go | 64 +- .../go-openapi/analysis/flatten_name.go | 39 +- .../go-openapi/analysis/flatten_options.go | 1 + .../analysis/internal/debug/debug.go | 4 +- .../internal/flatten/replace/replace.go | 42 +- .../analysis/internal/flatten/sortref/keys.go | 2 +- .../github.com/go-openapi/analysis/mixin.go | 16 +- .../github.com/go-openapi/analysis/schema.go | 12 +- .../go-openapi/jsonpointer/.golangci.yml | 61 + .../go-openapi/jsonpointer/README.md | 8 +- .../go-openapi/jsonpointer/pointer.go | 191 +- .../go-openapi/jsonreference/.golangci.yml | 57 +- .../go-openapi/jsonreference/README.md | 14 +- .../github.com/go-openapi/loads/.golangci.yml | 49 +- vendor/github.com/go-openapi/loads/README.md | 2 +- vendor/github.com/go-openapi/loads/doc.go | 9 +- vendor/github.com/go-openapi/loads/loaders.go | 9 +- vendor/github.com/go-openapi/loads/spec.go | 35 +- .../go-openapi/runtime/.golangci.yml | 56 +- .../github.com/go-openapi/runtime/README.md | 11 +- .../go-openapi/runtime/bytestream.go | 149 +- .../go-openapi/runtime/client/keepalive.go | 3 +- .../runtime/client/opentelemetry.go | 10 +- .../go-openapi/runtime/client/request.go | 17 +- .../go-openapi/runtime/client/runtime.go | 87 +- .../go-openapi/runtime/client_operation.go | 4 +- .../go-openapi/runtime/client_request.go | 4 +- vendor/github.com/go-openapi/runtime/csv.go | 327 +- .../go-openapi/runtime/csv_options.go | 121 + .../go-openapi/runtime/logger/standard.go | 2 + .../go-openapi/runtime/middleware/context.go | 175 +- .../runtime/middleware/denco/router.go | 37 +- .../go-openapi/runtime/middleware/doc.go | 87 +- .../go-openapi/runtime/middleware/go18.go | 9 - .../runtime/middleware/header/header.go | 9 +- .../runtime/middleware/parameter.go | 22 +- .../go-openapi/runtime/middleware/pre_go18.go | 9 - .../go-openapi/runtime/middleware/rapidoc.go | 72 +- .../go-openapi/runtime/middleware/redoc.go | 69 +- .../go-openapi/runtime/middleware/request.go | 23 +- .../go-openapi/runtime/middleware/router.go | 109 +- .../go-openapi/runtime/middleware/spec.go | 76 +- .../runtime/middleware/swaggerui.go | 89 +- .../runtime/middleware/swaggerui_oauth2.go | 31 +- .../runtime/middleware/ui_options.go | 173 + .../runtime/middleware/untyped/api.go | 15 +- .../runtime/middleware/validation.go | 12 +- .../github.com/go-openapi/runtime/request.go | 14 +- .../runtime/security/authenticator.go | 19 +- .../go-openapi/runtime/yamlpc/yaml.go | 3 +- vendor/github.com/go-openapi/spec/.gitignore | 3 +- .../github.com/go-openapi/spec/.golangci.yml | 21 +- vendor/github.com/go-openapi/spec/README.md | 28 +- .../github.com/go-openapi/spec/appveyor.yml | 32 - vendor/github.com/go-openapi/spec/bindata.go | 297 -- vendor/github.com/go-openapi/spec/embed.go | 17 + vendor/github.com/go-openapi/spec/expander.go | 75 +- .../go-openapi/spec/normalizer_nonwindows.go | 2 +- .../github.com/go-openapi/spec/operation.go | 5 +- .../github.com/go-openapi/spec/parameter.go | 42 +- .../go-openapi/spec/schema_loader.go | 9 +- .../spec/schemas/jsonschema-draft-04.json | 149 + .../go-openapi/spec/schemas/v2/schema.json | 1607 +++++++++ vendor/github.com/go-openapi/spec/spec.go | 6 +- vendor/github.com/go-openapi/spec/swagger.go | 4 +- vendor/github.com/go-openapi/spec/url_go18.go | 8 - vendor/github.com/go-openapi/spec/url_go19.go | 3 - .../go-openapi/validate/.golangci.yml | 53 +- .../go-openapi/validate/BENCHMARK.md | 31 + .../github.com/go-openapi/validate/README.md | 8 +- .../go-openapi/validate/appveyor.yml | 32 - .../go-openapi/validate/default_validator.go | 109 +- vendor/github.com/go-openapi/validate/doc.go | 70 +- .../go-openapi/validate/example_validator.go | 67 +- .../github.com/go-openapi/validate/formats.go | 78 +- .../github.com/go-openapi/validate/helpers.go | 23 +- .../go-openapi/validate/object_validator.go | 448 ++- .../github.com/go-openapi/validate/options.go | 21 +- .../github.com/go-openapi/validate/pools.go | 366 +++ .../go-openapi/validate/pools_debug.go | 1012 ++++++ .../github.com/go-openapi/validate/result.go | 131 +- .../github.com/go-openapi/validate/schema.go | 260 +- .../go-openapi/validate/schema_option.go | 31 +- .../go-openapi/validate/schema_props.go | 412 ++- .../go-openapi/validate/slice_validator.go | 57 +- vendor/github.com/go-openapi/validate/spec.go | 188 +- .../go-openapi/validate/spec_messages.go | 12 +- vendor/github.com/go-openapi/validate/type.go | 72 +- .../go-openapi/validate/validator.go | 936 ++++-- .../github.com/go-openapi/validate/values.go | 12 +- vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md | 5 + .../ginkgo/v2/ginkgo/watch/package_hash.go | 9 + .../onsi/ginkgo/v2/types/version.go | 2 +- .../fulcio/pkg/certificate/extensions.go | 2 +- .../pkg/generated/models/cose_v001_schema.go | 12 +- .../models/hashedrekord_v001_schema.go | 12 +- .../rekor/pkg/generated/models/log_entry.go | 2 +- .../sigstore/rekor/pkg/util/checkpoint.go | 31 +- .../github.com/sigstore/rekor/pkg/util/sha.go | 38 + .../net/http/otelhttp/common.go | 16 +- .../net/http/otelhttp/config.go | 7 +- .../net/http/otelhttp/handler.go | 42 +- .../otelhttp/internal/semconvutil/httpconv.go | 296 +- .../otelhttp/internal/semconvutil/netconv.go | 175 +- .../net/http/otelhttp/transport.go | 109 +- .../net/http/otelhttp/version.go | 2 +- .../instrumentation/net/http/otelhttp/wrap.go | 5 +- .../go.opentelemetry.io/otel/.codespellignore | 2 + vendor/go.opentelemetry.io/otel/CHANGELOG.md | 86 +- vendor/go.opentelemetry.io/otel/CODEOWNERS | 2 +- .../go.opentelemetry.io/otel/CONTRIBUTING.md | 2 +- vendor/go.opentelemetry.io/otel/Makefile | 4 +- vendor/go.opentelemetry.io/otel/README.md | 20 +- .../go.opentelemetry.io/otel/attribute/set.go | 2 +- .../otel/internal/global/meter.go | 4 +- .../otel/internal/global/state.go | 6 +- .../otel/semconv/internal/http.go | 338 -- .../otel/semconv/internal/v2/http.go | 404 +++ .../otel/semconv/internal/v2/net.go | 324 ++ .../otel/semconv/v1.12.0/http.go | 114 - .../otel/semconv/v1.12.0/resource.go | 1042 ------ .../otel/semconv/v1.12.0/trace.go | 1704 ---------- .../otel/semconv/v1.17.0/httpconv/http.go | 152 + .../otel/semconv/v1.20.0/attribute_group.go | 1209 +++++++ .../otel/semconv/{v1.12.0 => v1.20.0}/doc.go | 4 +- .../otel/semconv/v1.20.0/event.go | 199 ++ .../semconv/{v1.12.0 => v1.20.0}/exception.go | 2 +- .../otel/semconv/v1.20.0/http.go | 21 + .../otel/semconv/v1.20.0/resource.go | 2071 ++++++++++++ .../semconv/{v1.12.0 => v1.20.0}/schema.go | 4 +- .../otel/semconv/v1.20.0/trace.go | 2610 +++++++++++++++ vendor/go.opentelemetry.io/otel/version.go | 2 +- vendor/go.opentelemetry.io/otel/versions.yaml | 20 +- vendor/golang.org/x/oauth2/oauth2.go | 2 +- vendor/google.golang.org/grpc/clientconn.go | 2 + .../grpc/internal/transport/transport.go | 9 +- .../grpc/resolver/resolver.go | 3 + .../grpc/resolver_wrapper.go | 1 + vendor/google.golang.org/grpc/rpc_util.go | 54 +- vendor/google.golang.org/grpc/server.go | 11 +- vendor/google.golang.org/grpc/version.go | 2 +- vendor/gopkg.in/yaml.v2/.travis.yml | 17 - vendor/gopkg.in/yaml.v2/LICENSE.libyaml | 31 - vendor/gopkg.in/yaml.v2/NOTICE | 13 - vendor/gopkg.in/yaml.v2/README.md | 133 - vendor/gopkg.in/yaml.v2/apic.go | 744 ----- vendor/gopkg.in/yaml.v2/decode.go | 815 ----- vendor/gopkg.in/yaml.v2/emitterc.go | 1685 ---------- vendor/gopkg.in/yaml.v2/encode.go | 390 --- vendor/gopkg.in/yaml.v2/parserc.go | 1095 ------- vendor/gopkg.in/yaml.v2/readerc.go | 412 --- vendor/gopkg.in/yaml.v2/resolve.go | 258 -- vendor/gopkg.in/yaml.v2/scannerc.go | 2711 ---------------- vendor/gopkg.in/yaml.v2/sorter.go | 113 - vendor/gopkg.in/yaml.v2/writerc.go | 26 - vendor/gopkg.in/yaml.v2/yaml.go | 478 --- vendor/gopkg.in/yaml.v2/yamlh.go | 739 ----- vendor/gopkg.in/yaml.v2/yamlprivateh.go | 173 - vendor/modules.txt | 105 +- 318 files changed, 25202 insertions(+), 21958 deletions(-) create mode 100644 vendor/github.com/containers/common/pkg/secrets/define/secrets.go delete mode 100644 vendor/github.com/containers/storage/drivers/devmapper/device_setup.go delete mode 100644 vendor/github.com/containers/storage/drivers/devmapper/deviceset.go delete mode 100644 vendor/github.com/containers/storage/drivers/devmapper/devmapper_doc.go delete mode 100644 vendor/github.com/containers/storage/drivers/devmapper/driver.go delete mode 100644 vendor/github.com/containers/storage/drivers/devmapper/jsoniter.go delete mode 100644 vendor/github.com/containers/storage/drivers/devmapper/mount.go delete mode 100644 vendor/github.com/containers/storage/drivers/register/register_devicemapper.go create mode 100644 vendor/github.com/containers/storage/pkg/chunked/bloom_filter.go delete mode 100644 vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go delete mode 100644 vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go delete mode 100644 vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go delete mode 100644 vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go delete mode 100644 vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go delete mode 100644 vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go delete mode 100644 vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go delete mode 100644 vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go delete mode 100644 vendor/github.com/containers/storage/pkg/devicemapper/log.go delete mode 100644 vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux.go delete mode 100644 vendor/github.com/containers/storage/pkg/parsers/kernel/uname_freebsd.go delete mode 100644 vendor/github.com/containers/storage/pkg/parsers/kernel/uname_linux.go delete mode 100644 vendor/github.com/containers/storage/pkg/parsers/kernel/uname_solaris.go delete mode 100644 vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go delete mode 100644 vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported_type.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/.gitignore create mode 100644 vendor/github.com/go-jose/go-jose/v4/.golangci.yml create mode 100644 vendor/github.com/go-jose/go-jose/v4/.travis.yml create mode 100644 vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md create mode 100644 vendor/github.com/go-jose/go-jose/v4/CONTRIBUTING.md rename vendor/{gopkg.in/yaml.v2 => github.com/go-jose/go-jose/v4}/LICENSE (99%) create mode 100644 vendor/github.com/go-jose/go-jose/v4/README.md create mode 100644 vendor/github.com/go-jose/go-jose/v4/SECURITY.md create mode 100644 vendor/github.com/go-jose/go-jose/v4/asymmetric.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/cipher/cbc_hmac.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/cipher/concat_kdf.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/cipher/ecdh_es.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/cipher/key_wrap.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/crypter.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/doc.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/encoding.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/json/LICENSE create mode 100644 vendor/github.com/go-jose/go-jose/v4/json/README.md create mode 100644 vendor/github.com/go-jose/go-jose/v4/json/decode.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/json/encode.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/json/indent.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/json/scanner.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/json/stream.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/json/tags.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/jwe.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/jwk.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/jws.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/opaque.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/shared.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/signing.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/symmetric.go delete mode 100644 vendor/github.com/go-openapi/analysis/appveyor.yml create mode 100644 vendor/github.com/go-openapi/jsonpointer/.golangci.yml create mode 100644 vendor/github.com/go-openapi/runtime/csv_options.go delete mode 100644 vendor/github.com/go-openapi/runtime/middleware/go18.go delete mode 100644 vendor/github.com/go-openapi/runtime/middleware/pre_go18.go create mode 100644 vendor/github.com/go-openapi/runtime/middleware/ui_options.go delete mode 100644 vendor/github.com/go-openapi/spec/appveyor.yml delete mode 100644 vendor/github.com/go-openapi/spec/bindata.go create mode 100644 vendor/github.com/go-openapi/spec/embed.go create mode 100644 vendor/github.com/go-openapi/spec/schemas/jsonschema-draft-04.json create mode 100644 vendor/github.com/go-openapi/spec/schemas/v2/schema.json delete mode 100644 vendor/github.com/go-openapi/spec/url_go18.go create mode 100644 vendor/github.com/go-openapi/validate/BENCHMARK.md delete mode 100644 vendor/github.com/go-openapi/validate/appveyor.yml create mode 100644 vendor/github.com/go-openapi/validate/pools.go create mode 100644 vendor/github.com/go-openapi/validate/pools_debug.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/internal/http.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/internal/v2/http.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/internal/v2/net.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.12.0/http.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.12.0/resource.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.12.0/trace.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/httpconv/http.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go rename vendor/go.opentelemetry.io/otel/semconv/{v1.12.0 => v1.20.0}/doc.go (85%) create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go rename vendor/go.opentelemetry.io/otel/semconv/{v1.12.0 => v1.20.0}/exception.go (91%) create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go rename vendor/go.opentelemetry.io/otel/semconv/{v1.12.0 => v1.20.0}/schema.go (86%) create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go delete mode 100644 vendor/gopkg.in/yaml.v2/.travis.yml delete mode 100644 vendor/gopkg.in/yaml.v2/LICENSE.libyaml delete mode 100644 vendor/gopkg.in/yaml.v2/NOTICE delete mode 100644 vendor/gopkg.in/yaml.v2/README.md delete mode 100644 vendor/gopkg.in/yaml.v2/apic.go delete mode 100644 vendor/gopkg.in/yaml.v2/decode.go delete mode 100644 vendor/gopkg.in/yaml.v2/emitterc.go delete mode 100644 vendor/gopkg.in/yaml.v2/encode.go delete mode 100644 vendor/gopkg.in/yaml.v2/parserc.go delete mode 100644 vendor/gopkg.in/yaml.v2/readerc.go delete mode 100644 vendor/gopkg.in/yaml.v2/resolve.go delete mode 100644 vendor/gopkg.in/yaml.v2/scannerc.go delete mode 100644 vendor/gopkg.in/yaml.v2/sorter.go delete mode 100644 vendor/gopkg.in/yaml.v2/writerc.go delete mode 100644 vendor/gopkg.in/yaml.v2/yaml.go delete mode 100644 vendor/gopkg.in/yaml.v2/yamlh.go delete mode 100644 vendor/gopkg.in/yaml.v2/yamlprivateh.go diff --git a/go.mod b/go.mod index bea5cc4678..37c8f89251 100644 --- a/go.mod +++ b/go.mod @@ -13,15 +13,15 @@ require ( github.com/checkpoint-restore/checkpointctl v1.1.0 github.com/checkpoint-restore/go-criu/v7 v7.1.0 github.com/containernetworking/plugins v1.4.1 - github.com/containers/buildah v1.35.1-0.20240425012436-fc18157da3a1 - github.com/containers/common v0.58.1-0.20240419143618-deb3eeef3b74 + github.com/containers/buildah v1.35.1-0.20240507152307-453d2fc1099f + github.com/containers/common v0.58.1-0.20240508094622-694258d1f523 github.com/containers/conmon v2.0.20+incompatible github.com/containers/gvisor-tap-vsock v0.7.4-0.20240408151405-d744d71db363 - github.com/containers/image/v5 v5.30.1-0.20240411200840-dc519780d39f + github.com/containers/image/v5 v5.30.1-0.20240506211741-f0acff0fe152 github.com/containers/libhvee v0.7.1 github.com/containers/ocicrypt v1.1.10 github.com/containers/psgo v1.9.0 - github.com/containers/storage v1.53.1-0.20240411065836-1fd0dc1d20e5 + github.com/containers/storage v1.53.1-0.20240507041447-6cee10795c2d github.com/containers/winquit v1.1.0 github.com/coreos/go-systemd/v22 v22.5.1-0.20231103132048-7d375ecc2b09 github.com/coreos/stream-metadata-go v0.4.4 @@ -52,7 +52,7 @@ require ( github.com/moby/sys/user v0.1.0 github.com/moby/term v0.5.0 github.com/nxadm/tail v1.4.11 - github.com/onsi/ginkgo/v2 v2.17.2 + github.com/onsi/ginkgo/v2 v2.17.3 github.com/onsi/gomega v1.33.1 github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.1.0 @@ -88,7 +88,7 @@ require ( require ( dario.cat/mergo v1.0.0 // indirect github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect - github.com/Microsoft/hcsshim v0.12.0 // indirect + github.com/Microsoft/hcsshim v0.12.3 // indirect github.com/VividCortex/ewma v1.2.0 // indirect github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect github.com/aead/serpent v0.0.0-20160714141033-fba169763ea6 // indirect @@ -98,15 +98,15 @@ require ( github.com/chenzhuoyu/iasm v0.9.1 // indirect github.com/chzyer/readline v1.5.1 // indirect github.com/containerd/cgroups/v3 v3.0.3 // indirect - github.com/containerd/containerd v1.7.13 // indirect + github.com/containerd/containerd v1.7.16 // indirect github.com/containerd/errdefs v0.1.0 // indirect github.com/containerd/log v0.1.0 // indirect github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect github.com/containerd/typeurl/v2 v2.1.1 // indirect github.com/containernetworking/cni v1.1.2 // indirect github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect - github.com/containers/luksy v0.0.0-20240312134643-3d2cf0e19c84 // indirect - github.com/coreos/go-oidc/v3 v3.9.0 // indirect + github.com/containers/luksy v0.0.0-20240408185936-afd8e7619947 // indirect + github.com/coreos/go-oidc/v3 v3.10.0 // indirect github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect @@ -121,19 +121,20 @@ require ( github.com/gin-contrib/sse v0.1.0 // indirect github.com/gin-gonic/gin v1.9.1 // indirect github.com/go-jose/go-jose/v3 v3.0.3 // indirect + github.com/go-jose/go-jose/v4 v4.0.1 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.3.0 // indirect - github.com/go-openapi/analysis v0.21.4 // indirect + github.com/go-openapi/analysis v0.23.0 // indirect github.com/go-openapi/errors v0.22.0 // indirect - github.com/go-openapi/jsonpointer v0.19.6 // indirect - github.com/go-openapi/jsonreference v0.20.2 // indirect - github.com/go-openapi/loads v0.21.2 // indirect - github.com/go-openapi/runtime v0.26.0 // indirect - github.com/go-openapi/spec v0.20.9 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect + github.com/go-openapi/loads v0.22.0 // indirect + github.com/go-openapi/runtime v0.28.0 // indirect + github.com/go-openapi/spec v0.21.0 // indirect github.com/go-openapi/strfmt v0.23.0 // indirect github.com/go-openapi/swag v0.23.0 // indirect - github.com/go-openapi/validate v0.22.1 // indirect + github.com/go-openapi/validate v0.24.0 // indirect github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect github.com/go-playground/validator/v10 v10.17.0 // indirect @@ -143,7 +144,7 @@ require ( github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/go-cmp v0.6.0 // indirect - github.com/google/go-containerregistry v0.19.0 // indirect + github.com/google/go-containerregistry v0.19.1 // indirect github.com/google/go-intervals v0.0.2 // indirect github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect @@ -189,12 +190,12 @@ require ( github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect github.com/segmentio/ksuid v1.0.4 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect - github.com/sigstore/fulcio v1.4.3 // indirect - github.com/sigstore/rekor v1.2.2 // indirect + github.com/sigstore/fulcio v1.4.5 // indirect + github.com/sigstore/rekor v1.3.6 // indirect github.com/sigstore/sigstore v1.8.3 // indirect github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect - github.com/sylabs/sif/v2 v2.15.2 // indirect + github.com/sylabs/sif/v2 v2.16.0 // indirect github.com/tchap/go-patricia/v2 v2.3.1 // indirect github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect github.com/tklauser/go-sysconf v0.3.12 // indirect @@ -209,22 +210,20 @@ require ( go.mongodb.org/mongo-driver v1.14.0 // indirect go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect - go.opentelemetry.io/otel v1.22.0 // indirect - go.opentelemetry.io/otel/metric v1.22.0 // indirect - go.opentelemetry.io/otel/sdk v1.21.0 // indirect - go.opentelemetry.io/otel/trace v1.22.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect + go.opentelemetry.io/otel v1.24.0 // indirect + go.opentelemetry.io/otel/metric v1.24.0 // indirect + go.opentelemetry.io/otel/trace v1.24.0 // indirect golang.org/x/arch v0.7.0 // indirect golang.org/x/mod v0.17.0 // indirect - golang.org/x/oauth2 v0.19.0 // indirect + golang.org/x/oauth2 v0.20.0 // indirect golang.org/x/time v0.5.0 // indirect golang.org/x/tools v0.21.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 // indirect - google.golang.org/grpc v1.62.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect + google.golang.org/grpc v1.62.1 // indirect gopkg.in/go-jose/go-jose.v2 v2.6.3 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect tags.cncf.io/container-device-interface/specs-go v0.7.0 // indirect ) diff --git a/go.sum b/go.sum index 778a575d15..a3ffbc1169 100644 --- a/go.sum +++ b/go.sum @@ -12,19 +12,16 @@ github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8 github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= -github.com/Microsoft/hcsshim v0.12.0 h1:rbICA+XZFwrBef2Odk++0LjFvClNCJGRK+fsrP254Ts= -github.com/Microsoft/hcsshim v0.12.0/go.mod h1:RZV12pcHCXQ42XnlQ3pz6FZfmrC1C+R4gaOHhRNML1g= +github.com/Microsoft/hcsshim v0.12.3 h1:LS9NXqXhMoqNCplK1ApmVSfB4UnVLRDWRapB6EIlxE0= +github.com/Microsoft/hcsshim v0.12.3/go.mod h1:Iyl1WVpZzr+UkzjekHZbV8o5Z9ZkxNGx6CtY2Qg/JVQ= github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63nhn5WAunQHLTznkw5W8b1Xc0dNjp83s= github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDeC1lPdgDeDbhX8XFpy1jqjK0IBG8W5K+xYqA0w= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow= github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= github.com/aead/serpent v0.0.0-20160714141033-fba169763ea6 h1:5L8Mj9Co9sJVgW3TpYk2gxGJnDjsYuboNTcRmbtGKGs= github.com/aead/serpent v0.0.0-20160714141033-fba169763ea6/go.mod h1:3HgLJ9d18kXMLQlJvIY3+FszZYMxCz8WfE2MQ7hDY0w= -github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -66,8 +63,8 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0= github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0= -github.com/containerd/containerd v1.7.13 h1:wPYKIeGMN8vaggSKuV1X0wZulpMz4CrgEsZdaCyB6Is= -github.com/containerd/containerd v1.7.13/go.mod h1:zT3up6yTRfEUa6+GsITYIJNgSVL9NQ4x4h1RPzk0Wu4= +github.com/containerd/containerd v1.7.16 h1:7Zsfe8Fkj4Wi2My6DXGQ87hiqIrmOXolm72ZEkFU5Mg= +github.com/containerd/containerd v1.7.16/go.mod h1:NL49g7A/Fui7ccmxV6zkBWwqMgmMxFWzujYCc+JLt7k= github.com/containerd/errdefs v0.1.0 h1:m0wCRBiu1WJT/Fr+iOoQHMQS/eP5myQ8lCv4Dz5ZURM= github.com/containerd/errdefs v0.1.0/go.mod h1:YgWiiHtLmSeBrvpw+UfPijzbLaB77mEG1WwJTDETIV0= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= @@ -80,32 +77,32 @@ github.com/containernetworking/cni v1.1.2 h1:wtRGZVv7olUHMOqouPpn3cXJWpJgM6+EUl3 github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= github.com/containernetworking/plugins v1.4.1 h1:+sJRRv8PKhLkXIl6tH1D7RMi+CbbHutDGU+ErLBORWA= github.com/containernetworking/plugins v1.4.1/go.mod h1:n6FFGKcaY4o2o5msgu/UImtoC+fpQXM3076VHfHbj60= -github.com/containers/buildah v1.35.1-0.20240425012436-fc18157da3a1 h1:Qza4/6SPoIO/vR2wvSiYo3kst52dWUST0ZJG3L4ppYs= -github.com/containers/buildah v1.35.1-0.20240425012436-fc18157da3a1/go.mod h1:unO5wyQXGHXcDBFu0D+W3bUXvfQrMEh1J6a8dgX8F+4= -github.com/containers/common v0.58.1-0.20240419143618-deb3eeef3b74 h1:3o+wybYKyr03hlrNdZGDjV8ukVTU2JXttzAVk9OmRLg= -github.com/containers/common v0.58.1-0.20240419143618-deb3eeef3b74/go.mod h1:AnMTrXjygOD8jQKNBae4EEjKLlED9Svysh98Be+MktM= +github.com/containers/buildah v1.35.1-0.20240507152307-453d2fc1099f h1:nkYjiklm9gnopLfp1p+rOucLaLMKs+APUHowVwIk25k= +github.com/containers/buildah v1.35.1-0.20240507152307-453d2fc1099f/go.mod h1:CbpbxJEuDs1ymlLD9057ukMtDOF1HwEuvJaRZAX9Oqs= +github.com/containers/common v0.58.1-0.20240508094622-694258d1f523 h1:cOAggWgjjBmQgkK1kw4La+ov/TooskH8fgI0YUpgksA= +github.com/containers/common v0.58.1-0.20240508094622-694258d1f523/go.mod h1:384OZU759/9Vba1fqnjlngmuBmzCK1UjZY6t1GXsz9Q= github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg= github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I= github.com/containers/gvisor-tap-vsock v0.7.4-0.20240408151405-d744d71db363 h1:EqWMZeFa08y2c1GniaFkfjlO5AjegoG2foWo6NlDfUY= github.com/containers/gvisor-tap-vsock v0.7.4-0.20240408151405-d744d71db363/go.mod h1:KN4qqZfwVBzvqlN1Ytbhf84sOzftw+R8YL9bixQlr2Y= -github.com/containers/image/v5 v5.30.1-0.20240411200840-dc519780d39f h1:BpQt4TbrW3L9RNjACQgm7FEi+42u34aMMPopvE/J5Fc= -github.com/containers/image/v5 v5.30.1-0.20240411200840-dc519780d39f/go.mod h1:2c2hFZQhnz20XxbfHhyoKvxYbnTHCycHnMO3fZSmQY4= +github.com/containers/image/v5 v5.30.1-0.20240506211741-f0acff0fe152 h1:ngQBNZXzTAjGeaba0KhsRjC8uWWWf1pLMhzIAyW2Wts= +github.com/containers/image/v5 v5.30.1-0.20240506211741-f0acff0fe152/go.mod h1:luYQKDGJH9oNAjZj8i6Crnujm24gk2YQbYMAIkAhwNE= github.com/containers/libhvee v0.7.1 h1:dWGF5GLq9DZvXo3P8aDp3cNieL5eCaSell4UmeA/jY4= github.com/containers/libhvee v0.7.1/go.mod h1:fRKB3AyIqHMvq6xaeYhTpckM2cdoq0oecolyoiuLP7M= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= -github.com/containers/luksy v0.0.0-20240312134643-3d2cf0e19c84 h1:ZyHhVKzwWxHEphK9BFMe8mQU+++IutdHJxJZc5q0wHg= -github.com/containers/luksy v0.0.0-20240312134643-3d2cf0e19c84/go.mod h1:+idJYfuH2iLhmk8wZEavPACGMRTnZ/NK/5D74U67Um4= +github.com/containers/luksy v0.0.0-20240408185936-afd8e7619947 h1:LDm12XWmz7PQ9K6iy70m+tGxNlr39KcxFVc8CSnMT+I= +github.com/containers/luksy v0.0.0-20240408185936-afd8e7619947/go.mod h1:DeMi9C2WxgZtJLpBGd175oGZwX/pOmZ6xJVhA5XAG/g= github.com/containers/ocicrypt v1.1.10 h1:r7UR6o8+lyhkEywetubUUgcKFjOWOaWz8cEBrCPX0ic= github.com/containers/ocicrypt v1.1.10/go.mod h1:YfzSSr06PTHQwSTUKqDSjish9BeW1E4HUmreluQcMd8= github.com/containers/psgo v1.9.0 h1:eJ74jzSaCHnWt26OlKZROSyUyRcGDf+gYBdXnxrMW4g= github.com/containers/psgo v1.9.0/go.mod h1:0YoluUm43Mz2UnBIh1P+6V6NWcbpTL5uRtXyOcH0B5A= -github.com/containers/storage v1.53.1-0.20240411065836-1fd0dc1d20e5 h1:owLaLUu/RKf0x62tFm5ZQjU21oRUUIWTRMpZ0zkIt3E= -github.com/containers/storage v1.53.1-0.20240411065836-1fd0dc1d20e5/go.mod h1:P4tgJNR/o42wmg+9WZtoJtOJvmZKu2dwzFQggcH9aQw= +github.com/containers/storage v1.53.1-0.20240507041447-6cee10795c2d h1:AYhkfrN62V4+14wj2kC+HSHXWr0gKsUdzE1sWcSN6g4= +github.com/containers/storage v1.53.1-0.20240507041447-6cee10795c2d/go.mod h1:PlMOoinRrBSnhYODLxt4EXl0nmJt+X0kjG0Xdt9fMTw= github.com/containers/winquit v1.1.0 h1:jArun04BNDQvt2W0Y78kh9TazN2EIEMG5Im6/JY7+pE= github.com/containers/winquit v1.1.0/go.mod h1:PsPeZlnbkmGGIToMPHF1zhWjBUkd8aHjMOr/vFcPxw8= -github.com/coreos/go-oidc/v3 v3.9.0 h1:0J/ogVOd4y8P0f0xUh8l9t07xRP/d8tccvjHl2dcsSo= -github.com/coreos/go-oidc/v3 v3.9.0/go.mod h1:rTKz2PYwftcrtoCzV5g5kvfJoWcm0Mk8AF8y1iAQro4= +github.com/coreos/go-oidc/v3 v3.10.0 h1:tDnXHnLyiTVyT/2zLDGj09pFPkhND8Gl8lnTRhoEaJU= +github.com/coreos/go-oidc/v3 v3.10.0/go.mod h1:5j11xcw0D3+SGxn6Z/WFADsgcWVMyNAlSQupk0KK3ac= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f h1:JOrtw2xFKzlg+cbHpyrpLDmnN1HqhBfnX7WDiW7eG2c= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.5.1-0.20231103132048-7d375ecc2b09 h1:OoRAFlvDGCUqDLampLQjk0yeeSGdF9zzst/3G9IkBbc= @@ -117,7 +114,6 @@ github.com/crc-org/crc/v2 v2.35.0 h1:MiIVgOp5C1XE+4yi1wi2SPkQIo5enu65w3kZureUaeA github.com/crc-org/crc/v2 v2.35.0/go.mod h1:iJAZ/M+PvsKtcDrCN2cEDocCvR9bPrVylrucBzmaj90= github.com/crc-org/vfkit v0.5.1 h1:r1zNf1g1bLbgu5BgIQodirvYaIGWJQ91eS/PIgNO6lo= github.com/crc-org/vfkit v0.5.1/go.mod h1:Hqi20zQcqXMk6JqvByvOidHYv+KzPx3G+cjkdGSWv60= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f h1:eHnXnuK47UlSTOQexbzxAZfekVz6i+LKRdj1CU5DPaM= @@ -136,8 +132,8 @@ github.com/disiqueira/gotree/v3 v3.0.2 h1:ik5iuLQQoufZBNPY518dXhiO5056hyNBIK9lWh github.com/disiqueira/gotree/v3 v3.0.2/go.mod h1:ZuyjE4+mUQZlbpkI24AmruZKhg3VHEgPLDY8Qk+uUu8= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/cli v25.0.5+incompatible h1:3Llw3kcE1gOScEojA247iDD+p1l9hHeC7H3vf3Zd5fk= -github.com/docker/cli v25.0.5+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v26.1.1+incompatible h1:bE1/uE2tCa08fMv+7ikLR/RDPoCqytwrLtkIkSzxLvw= +github.com/docker/cli v26.1.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v26.1.1+incompatible h1:oI+4kkAgIwwb54b9OC7Xc3hSgu1RlJA/Lln/DF72djQ= @@ -158,8 +154,8 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= -github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= @@ -177,6 +173,8 @@ github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg= github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU= github.com/go-jose/go-jose/v3 v3.0.3 h1:fFKWeig/irsp7XD2zBxvnmA/XaRWp5V3CBsZXJF7G7k= github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ= +github.com/go-jose/go-jose/v4 v4.0.1 h1:QVEPDE3OluqXBQZDcnNvQrInro2h0e4eqNbnZSWqS6U= +github.com/go-jose/go-jose/v4 v4.0.1/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -185,44 +183,26 @@ github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= -github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= -github.com/go-openapi/analysis v0.21.4 h1:ZDFLvSNxpDaomuCueM0BlSXxpANBlFYiBvr+GXrvIHc= -github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9QyAgQRPp9y3pfo= -github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU= +github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo= github.com/go-openapi/errors v0.22.0 h1:c4xY/OLxUBSTiepAg3j/MHuAv5mJhnf53LLMWFB+u/w= github.com/go-openapi/errors v0.22.0/go.mod h1:J3DmZScxCDufmIMsdOuDHxJbdOGC0xtUynjIx092vXE= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= -github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= -github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= -github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= -github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= -github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= -github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g= -github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro= -github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw= -github.com/go-openapi/runtime v0.26.0 h1:HYOFtG00FM1UvqrcxbEJg/SwvDRvYLQKGhw2zaQjTcc= -github.com/go-openapi/runtime v0.26.0/go.mod h1:QgRGeZwrUcSHdeh4Ka9Glvo0ug1LC5WyE+EV88plZrQ= -github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I= -github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= -github.com/go-openapi/spec v0.20.9 h1:xnlYNQAwKd2VQRRfwTEI0DcK+2cbuvI/0c7jx3gA8/8= -github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= -github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= -github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= -github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= +github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco= +github.com/go-openapi/loads v0.22.0/go.mod h1:yLsaTCS92mnSAZX5WWoxszLj0u+Ojl+Zs5Stn1oF+rs= +github.com/go-openapi/runtime v0.28.0 h1:gpPPmWSNGo214l6n8hzdXYhPuJcGtziTOgUpvsFWGIQ= +github.com/go-openapi/runtime v0.28.0/go.mod h1:QN7OzcS+XuYmkQLw05akXk0jRH/eZ3kb18+1KwW9gyc= +github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY= +github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk= github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c= github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= -github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU= -github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= +github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58= +github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ= github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= @@ -233,36 +213,11 @@ github.com/go-playground/validator/v10 v10.17.0 h1:SmVVlfAOtlZncTxRuinDPomC2DkXJ github.com/go-playground/validator/v10 v10.17.0/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= github.com/go-rod/rod v0.114.7 h1:h4pimzSOUnw7Eo41zdJA788XsawzHjJMyzCE3BrBww0= github.com/go-rod/rod v0.114.7/go.mod h1:aiedSEFg5DwG/fnNbUOTPMTTWX3MRj6vIs/a684Mthw= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg= github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= -github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= -github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= -github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= -github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= -github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= -github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= -github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= -github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= -github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= -github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= -github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= -github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= -github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= -github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= -github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= -github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= -github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= -github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= @@ -289,21 +244,19 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-containerregistry v0.19.0 h1:uIsMRBV7m/HDkDxE/nXMnv1q+lOOSPlQ/ywc5JbB8Ic= -github.com/google/go-containerregistry v0.19.0/go.mod h1:u0qB2l7mvtWVR5kNcbFIhFY1hLbf8eeGapA+vbFDCtQ= +github.com/google/go-containerregistry v0.19.1 h1:yMQ62Al6/V0Z7CqIrrS1iYoA5/oQCm88DeNujc7C1KY= +github.com/google/go-containerregistry v0.19.1/go.mod h1:YCMFNQeeXeLF+dnhhWkqDItx/JSkH01j1Kis4PsjzFI= github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM= github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -316,7 +269,6 @@ github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQN github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= @@ -327,16 +279,16 @@ github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/schema v1.3.0 h1:rbciOzXAx3IB8stEFnfTwO3sYa6EWlQk79XdyustPDA= github.com/gorilla/schema v1.3.0/go.mod h1:Dg5SSm5PV60mhF2NFaTV1xuYYj8tV8NOPRo4FggUMnM= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0 h1:RtRsiaGvWxcwd8y3BiRZxsylPT8hLWZ5SPcfI+3IDNk= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0/go.mod h1:TzP6duP4Py2pHLVPPQp42aoYI92+PCrVotyR5e8Vqlk= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 h1:/c3QmbOGMGTOumP2iT/rCwB7b0QDGLKzqOmktBjT+Is= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1/go.mod h1:5SN9VR2LTsRFsrEC6FHgRbTWrTHu6tqPeKxEQv15giM= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v1.2.0 h1:La19f8d7WIlm4ogzNHB0JGqs5AUDAZ2UfCY4sJXcJdM= -github.com/hashicorp/go-hclog v1.2.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= +github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-retryablehttp v0.7.5 h1:bJj+Pj19UZMIweq/iie+1u5YCdGrnxCT9yvm0e+Nd5M= @@ -349,7 +301,6 @@ github.com/hugelgupf/socketpair v0.0.0-20230822150718-707395b1939a/go.mod h1:71B github.com/hugelgupf/vmtest v0.0.0-20230810222836-f8c8e381617c h1:4A+BVHylCBQPxlW1NrUITDpRAHCeX6QSZHmzzFQqliU= github.com/hugelgupf/vmtest v0.0.0-20230810222836-f8c8e381617c/go.mod h1:d2FMzS0rIF+3Daufcw660EZfTJihdNPeEwBBJgO4Ap0= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/insomniacslk/dhcp v0.0.0-20230731140434-0f9eb93a696c h1:P/3mFnHCv1A/ej4m8pF5EB6FUt9qEL2Q9lfrcUNwCYs= @@ -358,7 +309,6 @@ github.com/jinzhu/copier v0.4.0 h1:w3ciUoD19shMCRargcpm0cm91ytaBhDvuRpz1ODO/U8= github.com/jinzhu/copier v0.4.0/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= github.com/jmhodges/clock v1.2.0 h1:eq4kys+NI0PLngzaHEe7AmPT90XMGIEySD1JfV1PDIs= github.com/jmhodges/clock v1.2.0/go.mod h1:qKjhA7x7u/lQpPB1XAqX1b1lCI/w3/fNuYpI/ZjLynI= -github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/josharian/native v1.0.1-0.20221213033349-c1e37c09b531/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= @@ -366,11 +316,8 @@ github.com/josharian/native v1.1.0 h1:uuaP0hAbW7Y4l0ZRQ6C9zfb7Mg1mbFKry/xzDAfmtL github.com/josharian/native v1.1.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= -github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= @@ -379,16 +326,10 @@ github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZY github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q= @@ -399,15 +340,10 @@ github.com/linuxkit/virtsock v0.0.0-20220523201153-1a23e78aa7a2 h1:DZMFueDbfz6PN github.com/linuxkit/virtsock v0.0.0-20220523201153-1a23e78aa7a2/go.mod h1:SWzULI85WerrFt3u+nIm5F9l7EvxZTKQvd0InF3nmgM= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA= github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg= -github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= -github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= @@ -419,8 +355,6 @@ github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebG github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mdlayher/packet v1.1.2 h1:3Up1NG6LZrsgDVn6X4L9Ge/iyRyxFEFD9o6Pr3Q1nQY= github.com/mdlayher/packet v1.1.2/go.mod h1:GEu1+n9sG5VtiRE4SydOmX5GTwyyYlteZiFU+x0kew4= github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U= @@ -431,8 +365,6 @@ github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mistifyio/go-zfs/v3 v3.0.1 h1:YaoXgBePoMA12+S1u/ddkv+QqxcfiZK4prI6HPnkFiU= github.com/mistifyio/go-zfs/v3 v3.0.1/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k= -github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/moby/buildkit v0.12.5 h1:RNHH1l3HDhYyZafr5EgstEu8aGNCwyfvMtrQDtjH9T0= @@ -454,10 +386,8 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= @@ -468,8 +398,8 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.17.2 h1:7eMhcy3GimbsA3hEnVKdw/PQM9XN9krpKVXsZdph0/g= -github.com/onsi/ginkgo/v2 v2.17.2/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc= +github.com/onsi/ginkgo/v2 v2.17.3 h1:oJcvKpIb7/8uLpDDtnQuf18xVnwKp8DTD7DQ6gTd/MU= +github.com/onsi/ginkgo/v2 v2.17.3/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= @@ -493,14 +423,11 @@ github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+ github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f h1:/UDgs8FGMqwnHagNDPGOlts35QkhAZ8by3DR7nMih7M= github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc= -github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= github.com/pelletier/go-toml/v2 v2.1.1 h1:LWAJwfNvjQZCFIDKWYQaM62NcYeYViCmWIwmOStowAI= github.com/pelletier/go-toml/v2 v2.1.1/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= github.com/pierrec/lz4/v4 v4.1.14/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ= github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo= @@ -512,21 +439,18 @@ github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/proglottis/gpgme v0.1.3 h1:Crxx0oz4LKB3QXc5Ea0J19K/3ICfy3ftr5exgUK1AU0= github.com/proglottis/gpgme v0.1.3/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7GUN+0Gmy0= -github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= -github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= +github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= +github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= -github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= -github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= -github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= -github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= -github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= +github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos= +github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8= +github.com/prometheus/common v0.51.1 h1:eIjN50Bwglz6a/c3hAgSMcofL3nD+nFQkV6Dd4DsQCw= +github.com/prometheus/common v0.51.1/go.mod h1:lrWtQx+iDfn2mbH5GUzlH9TSHyfZpHkSiG1W7y3sF2Q= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/rootless-containers/rootlesskit/v2 v2.1.0 h1:dKqduSlzo5TlTv7tTIoTct2cRUNQf+soqcs+6b1ynvE= @@ -548,29 +472,23 @@ github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFt github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= -github.com/sigstore/fulcio v1.4.3 h1:9JcUCZjjVhRF9fmhVuz6i1RyhCc/EGCD7MOl+iqCJLQ= -github.com/sigstore/fulcio v1.4.3/go.mod h1:BQPWo7cfxmJwgaHlphUHUpFkp5+YxeJes82oo39m5og= -github.com/sigstore/rekor v1.2.2 h1:5JK/zKZvcQpL/jBmHvmFj3YbpDMBQnJQ6ygp8xdF3bY= -github.com/sigstore/rekor v1.2.2/go.mod h1:FGnWBGWzeNceJnp0x9eDFd41mI8aQqCjj+Zp0IEs0Qg= +github.com/sigstore/fulcio v1.4.5 h1:WWNnrOknD0DbruuZWCbN+86WRROpEl3Xts+WT2Ek1yc= +github.com/sigstore/fulcio v1.4.5/go.mod h1:oz3Qwlma8dWcSS/IENR/6SjbW4ipN0cxpRVfgdsjMU8= +github.com/sigstore/rekor v1.3.6 h1:QvpMMJVWAp69a3CHzdrLelqEqpTM3ByQRt5B5Kspbi8= +github.com/sigstore/rekor v1.3.6/go.mod h1:JDTSNNMdQ/PxdsS49DJkJ+pRJCO/83nbR5p3aZQteXc= github.com/sigstore/sigstore v1.8.3 h1:G7LVXqL+ekgYtYdksBks9B38dPoIsbscjQJX/MGWkA4= github.com/sigstore/sigstore v1.8.3/go.mod h1:mqbTEariiGA94cn6G3xnDiV6BD8eSLdL/eA7bvJ0fVs= -github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 h1:lIOOHPEbXzO3vnmx2gok1Tfs31Q8GQqKLc8vVqyQq/I= github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= @@ -586,13 +504,12 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/sylabs/sif/v2 v2.15.2 h1:UzeG36B+lynOnXBTgGFYxZ/ev0JWB73g8XXX6/1vvx8= -github.com/sylabs/sif/v2 v2.15.2/go.mod h1:65ua0JOZQ+8Rb9UReICXngJg9UjQBA0+SSxMa67qCPE= +github.com/sylabs/sif/v2 v2.16.0 h1:2eqaBaQQsn5DZTzm3QZm0HupZQEjNXfxRnCmtyCihEU= +github.com/sylabs/sif/v2 v2.16.0/go.mod h1:d5TxgD/mhMUU3kWLmZmWJQ99Wg0asaTP0bq3ezR1xpg= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes= github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= -github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= @@ -620,18 +537,12 @@ github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhg github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= -github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= -github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= -github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= -github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= -github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= -github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/ysmood/fetchup v0.2.3 h1:ulX+SonA0Vma5zUFXtv52Kzip/xe7aj4vqT5AJwQ+ZQ= github.com/ysmood/fetchup v0.2.3/go.mod h1:xhibcRKziSvol0H1/pj33dnKrYyI2ebIvz5cOOkYGns= github.com/ysmood/goob v0.4.0 h1:HsxXhyLBeGzWXnqVKtmT9qM7EuVs/XOgkX7T6r1o1AQ= @@ -650,44 +561,37 @@ github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0= go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ= -go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= -go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= -go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80= go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 h1:CCriYyAfq1Br1aIYettdHZTy8mBTIPo7We18TuO/bak= go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q= -go.opentelemetry.io/otel v1.22.0 h1:xS7Ku+7yTFvDfDraDIJVpw7XPyuHlB9MCiqqX5mcJ6Y= -go.opentelemetry.io/otel v1.22.0/go.mod h1:eoV4iAi3Ea8LkAEI9+GFT44O6T/D0GWAVFyZVCC6pMI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU= -go.opentelemetry.io/otel/metric v1.22.0 h1:lypMQnGyJYeuYPhOM/bgjbFM6WE44W1/T45er4d8Hhg= -go.opentelemetry.io/otel/metric v1.22.0/go.mod h1:evJGjVpZv0mQ5QBRJoBF64yMuOf4xCWdXjK8pzFvliY= -go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= -go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= -go.opentelemetry.io/otel/trace v1.22.0 h1:Hg6pPujv0XG9QaVbGOBVHunyuLcCC3jN7WEhPx83XD0= -go.opentelemetry.io/otel/trace v1.22.0/go.mod h1:RbbHXVqKES9QhzZq/fE5UnOSILqRt40a21sPw2He1xo= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= +go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= +go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 h1:9M3+rhx7kZCIQQhQRYaZCdNu1V73tm4TvXs2ntl98C4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0/go.mod h1:noq80iT8rrHP1SfybmPiRGc9dc5M8RPmGvtwo7Oo7tc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 h1:digkEZCJWobwBqMwC0cwCq8/wkkRy/OowZg5OArWZrM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0/go.mod h1:/OpE/y70qVkndM0TrxT4KBoN3RsFZP0QaofcfYrj76I= +go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= +go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= +go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw= +go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg= +go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= +go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= -go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= -go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= golang.org/x/arch v0.7.0 h1:pskyeJh/3AmoQ8CPE95vxHLqp1G1GfGNXTmcl9NEKTc= golang.org/x/arch v0.7.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= @@ -718,9 +622,7 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= @@ -728,12 +630,10 @@ golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.19.0 h1:9+E/EZBCbTLNrbN35fHv/a/d/mOBatymz1zbtQrXpIg= -golang.org/x/oauth2 v0.19.0/go.mod h1:vYi7skDa1x015PmRRYZ7+s1cWyPgrPiSYRe4rnsexc8= +golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo= +golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -743,15 +643,10 @@ golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -766,7 +661,6 @@ golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210331175145-43e1dd70ce54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -798,7 +692,6 @@ golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= @@ -813,11 +706,7 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -836,18 +725,18 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= -google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 h1:Lj5rbfG876hIAYFjqiJnPHfhXbv+nzTWfm04Fg/XSVU= -google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 h1:AjyfHzEPEFp/NpvfN5g+KDla3EMojjhRVZc1i7cj+oM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s= +google.golang.org/genproto v0.0.0-20240311173647-c811ad7063a7 h1:ImUcDPHjTrAqNhlOkSocDLfG9rrNHH7w7uoKWPaWZ8s= +google.golang.org/genproto/googleapis/api v0.0.0-20240311173647-c811ad7063a7 h1:oqta3O3AnlWbmIE3bFnWbu4bRxZjfbWCp0cKSuZh01E= +google.golang.org/genproto/googleapis/api v0.0.0-20240311173647-c811ad7063a7/go.mod h1:VQW3tUculP/D4B+xVCo+VgSq8As6wA9ZjHl//pmk+6s= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.62.0 h1:HQKZ/fa1bXkX1oFOvSjmZEUL8wLSaZTjCcLAlmZRtdk= -google.golang.org/grpc v1.62.0/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= +google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= +google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -863,11 +752,8 @@ google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/go-jose/go-jose.v2 v2.6.3 h1:nt80fvSDlhKWQgSWyHyy5CfmlQr+asih51R8PTWNKKs= gopkg.in/go-jose/go-jose.v2 v2.6.3/go.mod h1:zzZDPkNNw/c9IE7Z9jr11mBZQhKQTMzoEEIoEdZlFBI= @@ -879,14 +765,9 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkep gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go index 53d0beb87f..9f697beca8 100644 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go @@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error { case errnoERROR_IO_PENDING: return errERROR_IO_PENDING } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) return e } @@ -75,7 +72,7 @@ func _hcsAttachLayerStorageFilter(layerPath *uint16, layerData *uint16) (hr erro if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsAttachLayerStorageFilter.Addr(), 2, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(layerData)), 0) + r0, _, _ := syscall.SyscallN(procHcsAttachLayerStorageFilter.Addr(), uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(layerData))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -104,7 +101,7 @@ func _hcsAttachOverlayFilter(volumePath *uint16, layerData *uint16) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsAttachOverlayFilter.Addr(), 2, uintptr(unsafe.Pointer(volumePath)), uintptr(unsafe.Pointer(layerData)), 0) + r0, _, _ := syscall.SyscallN(procHcsAttachOverlayFilter.Addr(), uintptr(unsafe.Pointer(volumePath)), uintptr(unsafe.Pointer(layerData))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -128,7 +125,7 @@ func _hcsDestroyLayer(layerPath *uint16) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsDestroyLayer.Addr(), 1, uintptr(unsafe.Pointer(layerPath)), 0, 0) + r0, _, _ := syscall.SyscallN(procHcsDestroyLayer.Addr(), uintptr(unsafe.Pointer(layerPath))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -152,7 +149,7 @@ func _hcsDetachLayerStorageFilter(layerPath *uint16) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsDetachLayerStorageFilter.Addr(), 1, uintptr(unsafe.Pointer(layerPath)), 0, 0) + r0, _, _ := syscall.SyscallN(procHcsDetachLayerStorageFilter.Addr(), uintptr(unsafe.Pointer(layerPath))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -181,7 +178,7 @@ func _hcsDetachOverlayFilter(volumePath *uint16, layerData *uint16) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsDetachOverlayFilter.Addr(), 2, uintptr(unsafe.Pointer(volumePath)), uintptr(unsafe.Pointer(layerData)), 0) + r0, _, _ := syscall.SyscallN(procHcsDetachOverlayFilter.Addr(), uintptr(unsafe.Pointer(volumePath)), uintptr(unsafe.Pointer(layerData))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -220,7 +217,7 @@ func _hcsExportLayer(layerPath *uint16, exportFolderPath *uint16, layerData *uin if hr != nil { return } - r0, _, _ := syscall.Syscall6(procHcsExportLayer.Addr(), 4, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(exportFolderPath)), uintptr(unsafe.Pointer(layerData)), uintptr(unsafe.Pointer(options)), 0, 0) + r0, _, _ := syscall.SyscallN(procHcsExportLayer.Addr(), uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(exportFolderPath)), uintptr(unsafe.Pointer(layerData)), uintptr(unsafe.Pointer(options))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -235,7 +232,7 @@ func hcsFormatWritableLayerVhd(handle windows.Handle) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsFormatWritableLayerVhd.Addr(), 1, uintptr(handle), 0, 0) + r0, _, _ := syscall.SyscallN(procHcsFormatWritableLayerVhd.Addr(), uintptr(handle)) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -250,7 +247,7 @@ func hcsGetLayerVhdMountPath(vhdHandle windows.Handle, mountPath **uint16) (hr e if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsGetLayerVhdMountPath.Addr(), 2, uintptr(vhdHandle), uintptr(unsafe.Pointer(mountPath)), 0) + r0, _, _ := syscall.SyscallN(procHcsGetLayerVhdMountPath.Addr(), uintptr(vhdHandle), uintptr(unsafe.Pointer(mountPath))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -284,7 +281,7 @@ func _hcsImportLayer(layerPath *uint16, sourceFolderPath *uint16, layerData *uin if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsImportLayer.Addr(), 3, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(sourceFolderPath)), uintptr(unsafe.Pointer(layerData))) + r0, _, _ := syscall.SyscallN(procHcsImportLayer.Addr(), uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(sourceFolderPath)), uintptr(unsafe.Pointer(layerData))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -318,7 +315,7 @@ func _hcsInitializeWritableLayer(writableLayerPath *uint16, layerData *uint16, o if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsInitializeWritableLayer.Addr(), 3, uintptr(unsafe.Pointer(writableLayerPath)), uintptr(unsafe.Pointer(layerData)), uintptr(unsafe.Pointer(options))) + r0, _, _ := syscall.SyscallN(procHcsInitializeWritableLayer.Addr(), uintptr(unsafe.Pointer(writableLayerPath)), uintptr(unsafe.Pointer(layerData)), uintptr(unsafe.Pointer(options))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -347,7 +344,7 @@ func _hcsSetupBaseOSLayer(layerPath *uint16, handle windows.Handle, options *uin if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsSetupBaseOSLayer.Addr(), 3, uintptr(unsafe.Pointer(layerPath)), uintptr(handle), uintptr(unsafe.Pointer(options))) + r0, _, _ := syscall.SyscallN(procHcsSetupBaseOSLayer.Addr(), uintptr(unsafe.Pointer(layerPath)), uintptr(handle), uintptr(unsafe.Pointer(options))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -381,7 +378,7 @@ func _hcsSetupBaseOSVolume(layerPath *uint16, volumePath *uint16, options *uint1 if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsSetupBaseOSVolume.Addr(), 3, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(volumePath)), uintptr(unsafe.Pointer(options))) + r0, _, _ := syscall.SyscallN(procHcsSetupBaseOSVolume.Addr(), uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(volumePath)), uintptr(unsafe.Pointer(options))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go index 5dcb97eb39..76eb2be7cf 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go @@ -14,14 +14,14 @@ import ( "golang.org/x/sys/windows" ) -// makeOpenFiles calls winio.MakeOpenFile for each handle in a slice but closes all the handles +// makeOpenFiles calls winio.NewOpenFile for each handle in a slice but closes all the handles // if there is an error. func makeOpenFiles(hs []syscall.Handle) (_ []io.ReadWriteCloser, err error) { fs := make([]io.ReadWriteCloser, len(hs)) for i, h := range hs { if h != syscall.Handle(0) { if err == nil { - fs[i], err = winio.MakeOpenFile(h) + fs[i], err = winio.NewOpenFile(windows.Handle(h)) } if err != nil { syscall.Close(h) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go index a35ee945db..11c7e97e34 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go @@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error { case errnoERROR_IO_PENDING: return errERROR_IO_PENDING } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) return e } @@ -69,7 +66,7 @@ func __hnsCall(method *uint16, path *uint16, object *uint16, response **uint16) if hr != nil { return } - r0, _, _ := syscall.Syscall6(procHNSCall.Addr(), 4, uintptr(unsafe.Pointer(method)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(object)), uintptr(unsafe.Pointer(response)), 0, 0) + r0, _, _ := syscall.SyscallN(procHNSCall.Addr(), uintptr(unsafe.Pointer(method)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(object)), uintptr(unsafe.Pointer(response))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff diff --git a/vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go index a17a112508..14c750bd8d 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go @@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error { case errnoERROR_IO_PENDING: return errERROR_IO_PENDING } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) return e } @@ -46,6 +43,6 @@ var ( ) func coTaskMemFree(buffer unsafe.Pointer) { - syscall.Syscall(procCoTaskMemFree.Addr(), 1, uintptr(buffer), 0, 0) + syscall.SyscallN(procCoTaskMemFree.Addr(), uintptr(buffer)) return } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/security/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/security/zsyscall_windows.go index 26c986b88f..395f54687f 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/security/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/security/zsyscall_windows.go @@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error { case errnoERROR_IO_PENDING: return errERROR_IO_PENDING } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) return e } @@ -48,7 +45,7 @@ var ( ) func getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (win32err error) { - r0, _, _ := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(si), uintptr(unsafe.Pointer(ppsidOwner)), uintptr(unsafe.Pointer(ppsidGroup)), uintptr(unsafe.Pointer(ppDacl)), uintptr(unsafe.Pointer(ppSacl)), uintptr(unsafe.Pointer(ppSecurityDescriptor)), 0) + r0, _, _ := syscall.SyscallN(procGetSecurityInfo.Addr(), uintptr(handle), uintptr(objectType), uintptr(si), uintptr(unsafe.Pointer(ppsidOwner)), uintptr(unsafe.Pointer(ppsidGroup)), uintptr(unsafe.Pointer(ppDacl)), uintptr(unsafe.Pointer(ppSacl)), uintptr(unsafe.Pointer(ppSecurityDescriptor))) if r0 != 0 { win32err = syscall.Errno(r0) } @@ -56,7 +53,7 @@ func getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidO } func setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *uintptr) (win32err error) { - r0, _, _ := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(count), uintptr(pListOfEEs), uintptr(oldAcl), uintptr(unsafe.Pointer(newAcl)), 0, 0) + r0, _, _ := syscall.SyscallN(procSetEntriesInAclW.Addr(), uintptr(count), uintptr(pListOfEEs), uintptr(oldAcl), uintptr(unsafe.Pointer(newAcl))) if r0 != 0 { win32err = syscall.Errno(r0) } @@ -64,7 +61,7 @@ func setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl * } func setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (win32err error) { - r0, _, _ := syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(si), uintptr(psidOwner), uintptr(psidGroup), uintptr(pDacl), uintptr(pSacl), 0, 0) + r0, _, _ := syscall.SyscallN(procSetSecurityInfo.Addr(), uintptr(handle), uintptr(objectType), uintptr(si), uintptr(psidOwner), uintptr(psidGroup), uintptr(pDacl), uintptr(pSacl)) if r0 != 0 { win32err = syscall.Errno(r0) } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/zsyscall_windows.go index 42368872b7..67779de50b 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/zsyscall_windows.go @@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error { case errnoERROR_IO_PENDING: return errERROR_IO_PENDING } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) return e } @@ -75,7 +72,7 @@ func hcsCloseComputeSystem(computeSystem HcsSystem) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsCloseComputeSystem.Addr(), 1, uintptr(computeSystem), 0, 0) + r0, _, _ := syscall.SyscallN(procHcsCloseComputeSystem.Addr(), uintptr(computeSystem)) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -90,7 +87,7 @@ func hcsCloseProcess(process HcsProcess) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsCloseProcess.Addr(), 1, uintptr(process), 0, 0) + r0, _, _ := syscall.SyscallN(procHcsCloseProcess.Addr(), uintptr(process)) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -119,7 +116,7 @@ func _hcsCreateComputeSystem(id *uint16, configuration *uint16, identity syscall if hr != nil { return } - r0, _, _ := syscall.Syscall6(procHcsCreateComputeSystem.Addr(), 5, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(configuration)), uintptr(identity), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result)), 0) + r0, _, _ := syscall.SyscallN(procHcsCreateComputeSystem.Addr(), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(configuration)), uintptr(identity), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -143,7 +140,7 @@ func _hcsCreateProcess(computeSystem HcsSystem, processParameters *uint16, proce if hr != nil { return } - r0, _, _ := syscall.Syscall6(procHcsCreateProcess.Addr(), 5, uintptr(computeSystem), uintptr(unsafe.Pointer(processParameters)), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result)), 0) + r0, _, _ := syscall.SyscallN(procHcsCreateProcess.Addr(), uintptr(computeSystem), uintptr(unsafe.Pointer(processParameters)), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -167,7 +164,7 @@ func _hcsEnumerateComputeSystems(query *uint16, computeSystems **uint16, result if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsEnumerateComputeSystems.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(computeSystems)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.SyscallN(procHcsEnumerateComputeSystems.Addr(), uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(computeSystems)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -191,7 +188,7 @@ func _hcsGetComputeSystemProperties(computeSystem HcsSystem, propertyQuery *uint if hr != nil { return } - r0, _, _ := syscall.Syscall6(procHcsGetComputeSystemProperties.Addr(), 4, uintptr(computeSystem), uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0) + r0, _, _ := syscall.SyscallN(procHcsGetComputeSystemProperties.Addr(), uintptr(computeSystem), uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -206,7 +203,7 @@ func hcsGetProcessInfo(process HcsProcess, processInformation *HcsProcessInforma if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsGetProcessInfo.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.SyscallN(procHcsGetProcessInfo.Addr(), uintptr(process), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -221,7 +218,7 @@ func hcsGetProcessProperties(process HcsProcess, processProperties **uint16, res if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsGetProcessProperties.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(processProperties)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.SyscallN(procHcsGetProcessProperties.Addr(), uintptr(process), uintptr(unsafe.Pointer(processProperties)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -245,7 +242,7 @@ func _hcsGetServiceProperties(propertyQuery *uint16, properties **uint16, result if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsGetServiceProperties.Addr(), 3, uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.SyscallN(procHcsGetServiceProperties.Addr(), uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -269,7 +266,7 @@ func _hcsModifyComputeSystem(computeSystem HcsSystem, configuration *uint16, res if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsModifyComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(configuration)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.SyscallN(procHcsModifyComputeSystem.Addr(), uintptr(computeSystem), uintptr(unsafe.Pointer(configuration)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -293,7 +290,7 @@ func _hcsModifyProcess(process HcsProcess, settings *uint16, result **uint16) (h if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsModifyProcess.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.SyscallN(procHcsModifyProcess.Addr(), uintptr(process), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -317,7 +314,7 @@ func _hcsModifyServiceSettings(settings *uint16, result **uint16) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsModifyServiceSettings.Addr(), 2, uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result)), 0) + r0, _, _ := syscall.SyscallN(procHcsModifyServiceSettings.Addr(), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -341,7 +338,7 @@ func _hcsOpenComputeSystem(id *uint16, computeSystem *HcsSystem, result **uint16 if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsOpenComputeSystem.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.SyscallN(procHcsOpenComputeSystem.Addr(), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -356,7 +353,7 @@ func hcsOpenProcess(computeSystem HcsSystem, pid uint32, process *HcsProcess, re if hr != nil { return } - r0, _, _ := syscall.Syscall6(procHcsOpenProcess.Addr(), 4, uintptr(computeSystem), uintptr(pid), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result)), 0, 0) + r0, _, _ := syscall.SyscallN(procHcsOpenProcess.Addr(), uintptr(computeSystem), uintptr(pid), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -380,7 +377,7 @@ func _hcsPauseComputeSystem(computeSystem HcsSystem, options *uint16, result **u if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsPauseComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.SyscallN(procHcsPauseComputeSystem.Addr(), uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -395,7 +392,7 @@ func hcsRegisterComputeSystemCallback(computeSystem HcsSystem, callback uintptr, if hr != nil { return } - r0, _, _ := syscall.Syscall6(procHcsRegisterComputeSystemCallback.Addr(), 4, uintptr(computeSystem), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle)), 0, 0) + r0, _, _ := syscall.SyscallN(procHcsRegisterComputeSystemCallback.Addr(), uintptr(computeSystem), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -410,7 +407,7 @@ func hcsRegisterProcessCallback(process HcsProcess, callback uintptr, context ui if hr != nil { return } - r0, _, _ := syscall.Syscall6(procHcsRegisterProcessCallback.Addr(), 4, uintptr(process), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle)), 0, 0) + r0, _, _ := syscall.SyscallN(procHcsRegisterProcessCallback.Addr(), uintptr(process), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -434,7 +431,7 @@ func _hcsResumeComputeSystem(computeSystem HcsSystem, options *uint16, result ** if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsResumeComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.SyscallN(procHcsResumeComputeSystem.Addr(), uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -458,7 +455,7 @@ func _hcsSaveComputeSystem(computeSystem HcsSystem, options *uint16, result **ui if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsSaveComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.SyscallN(procHcsSaveComputeSystem.Addr(), uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -482,7 +479,7 @@ func _hcsShutdownComputeSystem(computeSystem HcsSystem, options *uint16, result if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsShutdownComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.SyscallN(procHcsShutdownComputeSystem.Addr(), uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -506,7 +503,7 @@ func _hcsSignalProcess(process HcsProcess, options *uint16, result **uint16) (hr if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsSignalProcess.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.SyscallN(procHcsSignalProcess.Addr(), uintptr(process), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -530,7 +527,7 @@ func _hcsStartComputeSystem(computeSystem HcsSystem, options *uint16, result **u if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsStartComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.SyscallN(procHcsStartComputeSystem.Addr(), uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -554,7 +551,7 @@ func _hcsTerminateComputeSystem(computeSystem HcsSystem, options *uint16, result if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsTerminateComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.SyscallN(procHcsTerminateComputeSystem.Addr(), uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -569,7 +566,7 @@ func hcsTerminateProcess(process HcsProcess, result **uint16) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsTerminateProcess.Addr(), 2, uintptr(process), uintptr(unsafe.Pointer(result)), 0) + r0, _, _ := syscall.SyscallN(procHcsTerminateProcess.Addr(), uintptr(process), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -584,7 +581,7 @@ func hcsUnregisterComputeSystemCallback(callbackHandle HcsCallback) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsUnregisterComputeSystemCallback.Addr(), 1, uintptr(callbackHandle), 0, 0) + r0, _, _ := syscall.SyscallN(procHcsUnregisterComputeSystemCallback.Addr(), uintptr(callbackHandle)) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -599,7 +596,7 @@ func hcsUnregisterProcessCallback(callbackHandle HcsCallback) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsUnregisterProcessCallback.Addr(), 1, uintptr(callbackHandle), 0, 0) + r0, _, _ := syscall.SyscallN(procHcsUnregisterProcessCallback.Addr(), uintptr(callbackHandle)) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go index 0cb509c46f..403b94fc57 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go @@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error { case errnoERROR_IO_PENDING: return errERROR_IO_PENDING } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) return e } @@ -77,7 +74,7 @@ func getDiskFreeSpaceEx(directoryName string, freeBytesAvailableToCaller *int64, } func _getDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *int64, totalNumberOfBytes *int64, totalNumberOfFreeBytes *int64) (err error) { - r1, _, e1 := syscall.Syscall6(procGetDiskFreeSpaceExW.Addr(), 4, uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetDiskFreeSpaceExW.Addr(), uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes))) if r1 == 0 { err = errnoErr(e1) } @@ -85,7 +82,7 @@ func _getDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *int6 } func attachVirtualDisk(handle syscall.Handle, sd uintptr, flags uint32, providerFlags uint32, params uintptr, overlapped uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procAttachVirtualDisk.Addr(), 6, uintptr(handle), uintptr(sd), uintptr(flags), uintptr(providerFlags), uintptr(params), uintptr(overlapped)) + r1, _, e1 := syscall.SyscallN(procAttachVirtualDisk.Addr(), uintptr(handle), uintptr(sd), uintptr(flags), uintptr(providerFlags), uintptr(params), uintptr(overlapped)) if r1 != 0 { err = errnoErr(e1) } @@ -102,7 +99,7 @@ func openVirtualDisk(virtualStorageType *virtualStorageType, path string, virtua } func _openVirtualDisk(virtualStorageType *virtualStorageType, path *uint16, virtualDiskAccessMask uint32, flags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (err error) { - r1, _, e1 := syscall.Syscall6(procOpenVirtualDisk.Addr(), 6, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(flags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle))) + r1, _, e1 := syscall.SyscallN(procOpenVirtualDisk.Addr(), uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(flags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle))) if r1 != 0 { err = errnoErr(e1) } @@ -123,7 +120,7 @@ func _activateLayer(info *driverInfo, id *uint16) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procActivateLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0) + r0, _, _ := syscall.SyscallN(procActivateLayer.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -156,7 +153,7 @@ func _copyLayer(info *driverInfo, srcId *uint16, dstId *uint16, descriptors []WC if len(descriptors) > 0 { _p2 = &descriptors[0] } - r0, _, _ := syscall.Syscall6(procCopyLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(srcId)), uintptr(unsafe.Pointer(dstId)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0) + r0, _, _ := syscall.SyscallN(procCopyLayer.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(srcId)), uintptr(unsafe.Pointer(dstId)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -185,7 +182,7 @@ func _createLayer(info *driverInfo, id *uint16, parent *uint16) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procCreateLayer.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(parent))) + r0, _, _ := syscall.SyscallN(procCreateLayer.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(parent))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -213,7 +210,7 @@ func _createSandboxLayer(info *driverInfo, id *uint16, parent uintptr, descripto if len(descriptors) > 0 { _p1 = &descriptors[0] } - r0, _, _ := syscall.Syscall6(procCreateSandboxLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(parent), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)), 0) + r0, _, _ := syscall.SyscallN(procCreateSandboxLayer.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(parent), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -237,7 +234,7 @@ func _deactivateLayer(info *driverInfo, id *uint16) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procDeactivateLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0) + r0, _, _ := syscall.SyscallN(procDeactivateLayer.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -261,7 +258,7 @@ func _destroyLayer(info *driverInfo, id *uint16) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procDestroyLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0) + r0, _, _ := syscall.SyscallN(procDestroyLayer.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -285,7 +282,7 @@ func _expandSandboxSize(info *driverInfo, id *uint16, size uint64) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procExpandSandboxSize.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(size)) + r0, _, _ := syscall.SyscallN(procExpandSandboxSize.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(size)) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -318,7 +315,7 @@ func _exportLayer(info *driverInfo, id *uint16, path *uint16, descriptors []WC_L if len(descriptors) > 0 { _p2 = &descriptors[0] } - r0, _, _ := syscall.Syscall6(procExportLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0) + r0, _, _ := syscall.SyscallN(procExportLayer.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -333,7 +330,7 @@ func getBaseImages(buffer **uint16) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procGetBaseImages.Addr(), 1, uintptr(unsafe.Pointer(buffer)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetBaseImages.Addr(), uintptr(unsafe.Pointer(buffer))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -357,7 +354,7 @@ func _getLayerMountPath(info *driverInfo, id *uint16, length *uintptr, buffer *u if hr != nil { return } - r0, _, _ := syscall.Syscall6(procGetLayerMountPath.Addr(), 4, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(length)), uintptr(unsafe.Pointer(buffer)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetLayerMountPath.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(length)), uintptr(unsafe.Pointer(buffer))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -386,7 +383,7 @@ func _grantVmAccess(vmid *uint16, filepath *uint16) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procGrantVmAccess.Addr(), 2, uintptr(unsafe.Pointer(vmid)), uintptr(unsafe.Pointer(filepath)), 0) + r0, _, _ := syscall.SyscallN(procGrantVmAccess.Addr(), uintptr(unsafe.Pointer(vmid)), uintptr(unsafe.Pointer(filepath))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -419,7 +416,7 @@ func _importLayer(info *driverInfo, id *uint16, path *uint16, descriptors []WC_L if len(descriptors) > 0 { _p2 = &descriptors[0] } - r0, _, _ := syscall.Syscall6(procImportLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0) + r0, _, _ := syscall.SyscallN(procImportLayer.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -443,7 +440,7 @@ func _layerExists(info *driverInfo, id *uint16, exists *uint32) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procLayerExists.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(exists))) + r0, _, _ := syscall.SyscallN(procLayerExists.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(exists))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -467,7 +464,7 @@ func _nameToGuid(name *uint16, guid *_guid) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procNameToGuid.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(guid)), 0) + r0, _, _ := syscall.SyscallN(procNameToGuid.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(guid))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -495,7 +492,7 @@ func _prepareLayer(info *driverInfo, id *uint16, descriptors []WC_LAYER_DESCRIPT if len(descriptors) > 0 { _p1 = &descriptors[0] } - r0, _, _ := syscall.Syscall6(procPrepareLayer.Addr(), 4, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)), 0, 0) + r0, _, _ := syscall.SyscallN(procPrepareLayer.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -519,7 +516,7 @@ func _processBaseImage(path *uint16) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procProcessBaseImage.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + r0, _, _ := syscall.SyscallN(procProcessBaseImage.Addr(), uintptr(unsafe.Pointer(path))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -543,7 +540,7 @@ func _processUtilityImage(path *uint16) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procProcessUtilityImage.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + r0, _, _ := syscall.SyscallN(procProcessUtilityImage.Addr(), uintptr(unsafe.Pointer(path))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -567,7 +564,7 @@ func _unprepareLayer(info *driverInfo, id *uint16) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procUnprepareLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0) + r0, _, _ := syscall.SyscallN(procUnprepareLayer.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go index ffd3cd7ff3..33720fe8b5 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go @@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error { case errnoERROR_IO_PENDING: return errERROR_IO_PENDING } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) return e } @@ -109,7 +106,7 @@ var ( ) func LogonUser(username *uint16, domain *uint16, password *uint16, logonType uint32, logonProvider uint32, token *windows.Token) (err error) { - r1, _, e1 := syscall.Syscall6(procLogonUserW.Addr(), 6, uintptr(unsafe.Pointer(username)), uintptr(unsafe.Pointer(domain)), uintptr(unsafe.Pointer(password)), uintptr(logonType), uintptr(logonProvider), uintptr(unsafe.Pointer(token))) + r1, _, e1 := syscall.SyscallN(procLogonUserW.Addr(), uintptr(unsafe.Pointer(username)), uintptr(unsafe.Pointer(domain)), uintptr(unsafe.Pointer(password)), uintptr(logonType), uintptr(logonProvider), uintptr(unsafe.Pointer(token))) if r1 == 0 { err = errnoErr(e1) } @@ -121,7 +118,7 @@ func BfSetupFilter(jobHandle windows.Handle, flags uint32, virtRootPath *uint16, if hr != nil { return } - r0, _, _ := syscall.Syscall6(procBfSetupFilter.Addr(), 6, uintptr(jobHandle), uintptr(flags), uintptr(unsafe.Pointer(virtRootPath)), uintptr(unsafe.Pointer(virtTargetPath)), uintptr(unsafe.Pointer(virtExceptions)), uintptr(virtExceptionPathCount)) + r0, _, _ := syscall.SyscallN(procBfSetupFilter.Addr(), uintptr(jobHandle), uintptr(flags), uintptr(unsafe.Pointer(virtRootPath)), uintptr(unsafe.Pointer(virtTargetPath)), uintptr(unsafe.Pointer(virtExceptions)), uintptr(virtExceptionPathCount)) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -132,7 +129,7 @@ func BfSetupFilter(jobHandle windows.Handle, flags uint32, virtRootPath *uint16, } func CMGetDevNodeProperty(dnDevInst uint32, propertyKey *DevPropKey, propertyType *uint32, propertyBuffer *uint16, propertyBufferSize *uint32, uFlags uint32) (hr error) { - r0, _, _ := syscall.Syscall6(procCM_Get_DevNode_PropertyW.Addr(), 6, uintptr(dnDevInst), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(unsafe.Pointer(propertyBufferSize)), uintptr(uFlags)) + r0, _, _ := syscall.SyscallN(procCM_Get_DevNode_PropertyW.Addr(), uintptr(dnDevInst), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(unsafe.Pointer(propertyBufferSize)), uintptr(uFlags)) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -143,7 +140,7 @@ func CMGetDevNodeProperty(dnDevInst uint32, propertyKey *DevPropKey, propertyTyp } func CMGetDeviceIDList(pszFilter *byte, buffer *byte, bufferLen uint32, uFlags uint32) (hr error) { - r0, _, _ := syscall.Syscall6(procCM_Get_Device_ID_ListA.Addr(), 4, uintptr(unsafe.Pointer(pszFilter)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(uFlags), 0, 0) + r0, _, _ := syscall.SyscallN(procCM_Get_Device_ID_ListA.Addr(), uintptr(unsafe.Pointer(pszFilter)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(uFlags)) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -154,7 +151,7 @@ func CMGetDeviceIDList(pszFilter *byte, buffer *byte, bufferLen uint32, uFlags u } func CMGetDeviceIDListSize(pulLen *uint32, pszFilter *byte, uFlags uint32) (hr error) { - r0, _, _ := syscall.Syscall(procCM_Get_Device_ID_List_SizeA.Addr(), 3, uintptr(unsafe.Pointer(pulLen)), uintptr(unsafe.Pointer(pszFilter)), uintptr(uFlags)) + r0, _, _ := syscall.SyscallN(procCM_Get_Device_ID_List_SizeA.Addr(), uintptr(unsafe.Pointer(pulLen)), uintptr(unsafe.Pointer(pszFilter)), uintptr(uFlags)) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -174,7 +171,7 @@ func CMLocateDevNode(pdnDevInst *uint32, pDeviceID string, uFlags uint32) (hr er } func _CMLocateDevNode(pdnDevInst *uint32, pDeviceID *uint16, uFlags uint32) (hr error) { - r0, _, _ := syscall.Syscall(procCM_Locate_DevNodeW.Addr(), 3, uintptr(unsafe.Pointer(pdnDevInst)), uintptr(unsafe.Pointer(pDeviceID)), uintptr(uFlags)) + r0, _, _ := syscall.SyscallN(procCM_Locate_DevNodeW.Addr(), uintptr(unsafe.Pointer(pdnDevInst)), uintptr(unsafe.Pointer(pDeviceID)), uintptr(uFlags)) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -189,7 +186,7 @@ func CimCloseImage(cimFSHandle FsHandle) (err error) { if err != nil { return } - syscall.Syscall(procCimCloseImage.Addr(), 1, uintptr(cimFSHandle), 0, 0) + syscall.SyscallN(procCimCloseImage.Addr(), uintptr(cimFSHandle)) return } @@ -198,7 +195,7 @@ func CimCloseStream(cimStreamHandle StreamHandle) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procCimCloseStream.Addr(), 1, uintptr(cimStreamHandle), 0, 0) + r0, _, _ := syscall.SyscallN(procCimCloseStream.Addr(), uintptr(cimStreamHandle)) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -213,7 +210,7 @@ func CimCommitImage(cimFSHandle FsHandle) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procCimCommitImage.Addr(), 1, uintptr(cimFSHandle), 0, 0) + r0, _, _ := syscall.SyscallN(procCimCommitImage.Addr(), uintptr(cimFSHandle)) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -237,7 +234,7 @@ func _CimCreateAlternateStream(cimFSHandle FsHandle, path *uint16, size uint64, if hr != nil { return } - r0, _, _ := syscall.Syscall6(procCimCreateAlternateStream.Addr(), 4, uintptr(cimFSHandle), uintptr(unsafe.Pointer(path)), uintptr(size), uintptr(unsafe.Pointer(cimStreamHandle)), 0, 0) + r0, _, _ := syscall.SyscallN(procCimCreateAlternateStream.Addr(), uintptr(cimFSHandle), uintptr(unsafe.Pointer(path)), uintptr(size), uintptr(unsafe.Pointer(cimStreamHandle))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -261,7 +258,7 @@ func _CimCreateFile(cimFSHandle FsHandle, path *uint16, file *CimFsFileMetadata, if hr != nil { return } - r0, _, _ := syscall.Syscall6(procCimCreateFile.Addr(), 4, uintptr(cimFSHandle), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(cimStreamHandle)), 0, 0) + r0, _, _ := syscall.SyscallN(procCimCreateFile.Addr(), uintptr(cimFSHandle), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(cimStreamHandle))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -290,7 +287,7 @@ func _CimCreateHardLink(cimFSHandle FsHandle, newPath *uint16, oldPath *uint16) if hr != nil { return } - r0, _, _ := syscall.Syscall(procCimCreateHardLink.Addr(), 3, uintptr(cimFSHandle), uintptr(unsafe.Pointer(newPath)), uintptr(unsafe.Pointer(oldPath))) + r0, _, _ := syscall.SyscallN(procCimCreateHardLink.Addr(), uintptr(cimFSHandle), uintptr(unsafe.Pointer(newPath)), uintptr(unsafe.Pointer(oldPath))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -314,7 +311,7 @@ func _CimCreateImage(imagePath *uint16, oldFSName *uint16, newFSName *uint16, ci if hr != nil { return } - r0, _, _ := syscall.Syscall6(procCimCreateImage.Addr(), 4, uintptr(unsafe.Pointer(imagePath)), uintptr(unsafe.Pointer(oldFSName)), uintptr(unsafe.Pointer(newFSName)), uintptr(unsafe.Pointer(cimFSHandle)), 0, 0) + r0, _, _ := syscall.SyscallN(procCimCreateImage.Addr(), uintptr(unsafe.Pointer(imagePath)), uintptr(unsafe.Pointer(oldFSName)), uintptr(unsafe.Pointer(newFSName)), uintptr(unsafe.Pointer(cimFSHandle))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -338,7 +335,7 @@ func _CimDeletePath(cimFSHandle FsHandle, path *uint16) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procCimDeletePath.Addr(), 2, uintptr(cimFSHandle), uintptr(unsafe.Pointer(path)), 0) + r0, _, _ := syscall.SyscallN(procCimDeletePath.Addr(), uintptr(cimFSHandle), uintptr(unsafe.Pointer(path))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -353,7 +350,7 @@ func CimDismountImage(volumeID *g) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procCimDismountImage.Addr(), 1, uintptr(unsafe.Pointer(volumeID)), 0, 0) + r0, _, _ := syscall.SyscallN(procCimDismountImage.Addr(), uintptr(unsafe.Pointer(volumeID))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -382,7 +379,7 @@ func _CimMountImage(imagePath *uint16, fsName *uint16, flags uint32, volumeID *g if hr != nil { return } - r0, _, _ := syscall.Syscall6(procCimMountImage.Addr(), 4, uintptr(unsafe.Pointer(imagePath)), uintptr(unsafe.Pointer(fsName)), uintptr(flags), uintptr(unsafe.Pointer(volumeID)), 0, 0) + r0, _, _ := syscall.SyscallN(procCimMountImage.Addr(), uintptr(unsafe.Pointer(imagePath)), uintptr(unsafe.Pointer(fsName)), uintptr(flags), uintptr(unsafe.Pointer(volumeID))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -397,7 +394,7 @@ func CimWriteStream(cimStreamHandle StreamHandle, buffer uintptr, bufferSize uin if hr != nil { return } - r0, _, _ := syscall.Syscall(procCimWriteStream.Addr(), 3, uintptr(cimStreamHandle), uintptr(buffer), uintptr(bufferSize)) + r0, _, _ := syscall.SyscallN(procCimWriteStream.Addr(), uintptr(cimStreamHandle), uintptr(buffer), uintptr(bufferSize)) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -408,7 +405,7 @@ func CimWriteStream(cimStreamHandle StreamHandle, buffer uintptr, bufferSize uin } func SetJobCompartmentId(handle windows.Handle, compartmentId uint32) (win32Err error) { - r0, _, _ := syscall.Syscall(procSetJobCompartmentId.Addr(), 2, uintptr(handle), uintptr(compartmentId), 0) + r0, _, _ := syscall.SyscallN(procSetJobCompartmentId.Addr(), uintptr(handle), uintptr(compartmentId)) if r0 != 0 { win32Err = syscall.Errno(r0) } @@ -416,12 +413,12 @@ func SetJobCompartmentId(handle windows.Handle, compartmentId uint32) (win32Err } func ClosePseudoConsole(hpc windows.Handle) { - syscall.Syscall(procClosePseudoConsole.Addr(), 1, uintptr(hpc), 0, 0) + syscall.SyscallN(procClosePseudoConsole.Addr(), uintptr(hpc)) return } func CopyFileW(existingFileName *uint16, newFileName *uint16, failIfExists int32) (err error) { - r1, _, e1 := syscall.Syscall(procCopyFileW.Addr(), 3, uintptr(unsafe.Pointer(existingFileName)), uintptr(unsafe.Pointer(newFileName)), uintptr(failIfExists)) + r1, _, e1 := syscall.SyscallN(procCopyFileW.Addr(), uintptr(unsafe.Pointer(existingFileName)), uintptr(unsafe.Pointer(newFileName)), uintptr(failIfExists)) if r1 == 0 { err = errnoErr(e1) } @@ -429,7 +426,7 @@ func CopyFileW(existingFileName *uint16, newFileName *uint16, failIfExists int32 } func createPseudoConsole(size uint32, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) (hr error) { - r0, _, _ := syscall.Syscall6(procCreatePseudoConsole.Addr(), 5, uintptr(size), uintptr(hInput), uintptr(hOutput), uintptr(dwFlags), uintptr(unsafe.Pointer(hpcon)), 0) + r0, _, _ := syscall.SyscallN(procCreatePseudoConsole.Addr(), uintptr(size), uintptr(hInput), uintptr(hOutput), uintptr(dwFlags), uintptr(unsafe.Pointer(hpcon))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -440,7 +437,7 @@ func createPseudoConsole(size uint32, hInput windows.Handle, hOutput windows.Han } func CreateRemoteThread(process windows.Handle, sa *windows.SecurityAttributes, stackSize uint32, startAddr uintptr, parameter uintptr, creationFlags uint32, threadID *uint32) (handle windows.Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateRemoteThread.Addr(), 7, uintptr(process), uintptr(unsafe.Pointer(sa)), uintptr(stackSize), uintptr(startAddr), uintptr(parameter), uintptr(creationFlags), uintptr(unsafe.Pointer(threadID)), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateRemoteThread.Addr(), uintptr(process), uintptr(unsafe.Pointer(sa)), uintptr(stackSize), uintptr(startAddr), uintptr(parameter), uintptr(creationFlags), uintptr(unsafe.Pointer(threadID))) handle = windows.Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -449,13 +446,13 @@ func CreateRemoteThread(process windows.Handle, sa *windows.SecurityAttributes, } func GetActiveProcessorCount(groupNumber uint16) (amount uint32) { - r0, _, _ := syscall.Syscall(procGetActiveProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0) + r0, _, _ := syscall.SyscallN(procGetActiveProcessorCount.Addr(), uintptr(groupNumber)) amount = uint32(r0) return } func IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *int32) (err error) { - r1, _, e1 := syscall.Syscall(procIsProcessInJob.Addr(), 3, uintptr(procHandle), uintptr(jobHandle), uintptr(unsafe.Pointer(result))) + r1, _, e1 := syscall.SyscallN(procIsProcessInJob.Addr(), uintptr(procHandle), uintptr(jobHandle), uintptr(unsafe.Pointer(result))) if r1 == 0 { err = errnoErr(e1) } @@ -463,18 +460,18 @@ func IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result } func LocalAlloc(flags uint32, size int) (ptr uintptr) { - r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(flags), uintptr(size), 0) + r0, _, _ := syscall.SyscallN(procLocalAlloc.Addr(), uintptr(flags), uintptr(size)) ptr = uintptr(r0) return } func LocalFree(ptr uintptr) { - syscall.Syscall(procLocalFree.Addr(), 1, uintptr(ptr), 0, 0) + syscall.SyscallN(procLocalFree.Addr(), uintptr(ptr)) return } func OpenJobObject(desiredAccess uint32, inheritHandle int32, lpName *uint16) (handle windows.Handle, err error) { - r0, _, e1 := syscall.Syscall(procOpenJobObjectW.Addr(), 3, uintptr(desiredAccess), uintptr(inheritHandle), uintptr(unsafe.Pointer(lpName))) + r0, _, e1 := syscall.SyscallN(procOpenJobObjectW.Addr(), uintptr(desiredAccess), uintptr(inheritHandle), uintptr(unsafe.Pointer(lpName))) handle = windows.Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -483,7 +480,7 @@ func OpenJobObject(desiredAccess uint32, inheritHandle int32, lpName *uint16) (h } func QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo unsafe.Pointer, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryInformationJobObject.Addr(), 5, uintptr(jobHandle), uintptr(infoClass), uintptr(jobObjectInfo), uintptr(jobObjectInformationLength), uintptr(unsafe.Pointer(lpReturnLength)), 0) + r1, _, e1 := syscall.SyscallN(procQueryInformationJobObject.Addr(), uintptr(jobHandle), uintptr(infoClass), uintptr(jobObjectInfo), uintptr(jobObjectInformationLength), uintptr(unsafe.Pointer(lpReturnLength))) if r1 == 0 { err = errnoErr(e1) } @@ -491,7 +488,7 @@ func QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobOb } func QueryIoRateControlInformationJobObject(jobHandle windows.Handle, volumeName *uint16, ioRateControlInfo **JOBOBJECT_IO_RATE_CONTROL_INFORMATION, infoBlockCount *uint32) (ret uint32, err error) { - r0, _, e1 := syscall.Syscall6(procQueryIoRateControlInformationJobObject.Addr(), 4, uintptr(jobHandle), uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(ioRateControlInfo)), uintptr(unsafe.Pointer(infoBlockCount)), 0, 0) + r0, _, e1 := syscall.SyscallN(procQueryIoRateControlInformationJobObject.Addr(), uintptr(jobHandle), uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(ioRateControlInfo)), uintptr(unsafe.Pointer(infoBlockCount))) ret = uint32(r0) if ret == 0 { err = errnoErr(e1) @@ -500,7 +497,7 @@ func QueryIoRateControlInformationJobObject(jobHandle windows.Handle, volumeName } func resizePseudoConsole(hPc windows.Handle, size uint32) (hr error) { - r0, _, _ := syscall.Syscall(procResizePseudoConsole.Addr(), 2, uintptr(hPc), uintptr(size), 0) + r0, _, _ := syscall.SyscallN(procResizePseudoConsole.Addr(), uintptr(hPc), uintptr(size)) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -511,7 +508,7 @@ func resizePseudoConsole(hPc windows.Handle, size uint32) (hr error) { } func SearchPath(lpPath *uint16, lpFileName *uint16, lpExtension *uint16, nBufferLength uint32, lpBuffer *uint16, lpFilePath *uint16) (size uint32, err error) { - r0, _, e1 := syscall.Syscall6(procSearchPathW.Addr(), 6, uintptr(unsafe.Pointer(lpPath)), uintptr(unsafe.Pointer(lpFileName)), uintptr(unsafe.Pointer(lpExtension)), uintptr(nBufferLength), uintptr(unsafe.Pointer(lpBuffer)), uintptr(unsafe.Pointer(lpFilePath))) + r0, _, e1 := syscall.SyscallN(procSearchPathW.Addr(), uintptr(unsafe.Pointer(lpPath)), uintptr(unsafe.Pointer(lpFileName)), uintptr(unsafe.Pointer(lpExtension)), uintptr(nBufferLength), uintptr(unsafe.Pointer(lpBuffer)), uintptr(unsafe.Pointer(lpFilePath))) size = uint32(r0) if size == 0 { err = errnoErr(e1) @@ -520,7 +517,7 @@ func SearchPath(lpPath *uint16, lpFileName *uint16, lpExtension *uint16, nBuffer } func SetIoRateControlInformationJobObject(jobHandle windows.Handle, ioRateControlInfo *JOBOBJECT_IO_RATE_CONTROL_INFORMATION) (ret uint32, err error) { - r0, _, e1 := syscall.Syscall(procSetIoRateControlInformationJobObject.Addr(), 2, uintptr(jobHandle), uintptr(unsafe.Pointer(ioRateControlInfo)), 0) + r0, _, e1 := syscall.SyscallN(procSetIoRateControlInformationJobObject.Addr(), uintptr(jobHandle), uintptr(unsafe.Pointer(ioRateControlInfo))) ret = uint32(r0) if ret == 0 { err = errnoErr(e1) @@ -529,7 +526,7 @@ func SetIoRateControlInformationJobObject(jobHandle windows.Handle, ioRateContro } func netLocalGroupAddMembers(serverName *uint16, groupName *uint16, level uint32, buf *byte, totalEntries uint32) (status error) { - r0, _, _ := syscall.Syscall6(procNetLocalGroupAddMembers.Addr(), 5, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(groupName)), uintptr(level), uintptr(unsafe.Pointer(buf)), uintptr(totalEntries), 0) + r0, _, _ := syscall.SyscallN(procNetLocalGroupAddMembers.Addr(), uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(groupName)), uintptr(level), uintptr(unsafe.Pointer(buf)), uintptr(totalEntries)) if r0 != 0 { status = syscall.Errno(r0) } @@ -537,7 +534,7 @@ func netLocalGroupAddMembers(serverName *uint16, groupName *uint16, level uint32 } func netLocalGroupGetInfo(serverName *uint16, groupName *uint16, level uint32, bufptr **byte) (status error) { - r0, _, _ := syscall.Syscall6(procNetLocalGroupGetInfo.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(groupName)), uintptr(level), uintptr(unsafe.Pointer(bufptr)), 0, 0) + r0, _, _ := syscall.SyscallN(procNetLocalGroupGetInfo.Addr(), uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(groupName)), uintptr(level), uintptr(unsafe.Pointer(bufptr))) if r0 != 0 { status = syscall.Errno(r0) } @@ -545,7 +542,7 @@ func netLocalGroupGetInfo(serverName *uint16, groupName *uint16, level uint32, b } func netUserAdd(serverName *uint16, level uint32, buf *byte, parm_err *uint32) (status error) { - r0, _, _ := syscall.Syscall6(procNetUserAdd.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(parm_err)), 0, 0) + r0, _, _ := syscall.SyscallN(procNetUserAdd.Addr(), uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(parm_err))) if r0 != 0 { status = syscall.Errno(r0) } @@ -553,7 +550,7 @@ func netUserAdd(serverName *uint16, level uint32, buf *byte, parm_err *uint32) ( } func netUserDel(serverName *uint16, username *uint16) (status error) { - r0, _, _ := syscall.Syscall(procNetUserDel.Addr(), 2, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(username)), 0) + r0, _, _ := syscall.SyscallN(procNetUserDel.Addr(), uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(username))) if r0 != 0 { status = syscall.Errno(r0) } @@ -561,25 +558,25 @@ func netUserDel(serverName *uint16, username *uint16) (status error) { } func NtCreateFile(handle *uintptr, accessMask uint32, oa *ObjectAttributes, iosb *IOStatusBlock, allocationSize *uint64, fileAttributes uint32, shareAccess uint32, createDisposition uint32, createOptions uint32, eaBuffer *byte, eaLength uint32) (status uint32) { - r0, _, _ := syscall.Syscall12(procNtCreateFile.Addr(), 11, uintptr(unsafe.Pointer(handle)), uintptr(accessMask), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(fileAttributes), uintptr(shareAccess), uintptr(createDisposition), uintptr(createOptions), uintptr(unsafe.Pointer(eaBuffer)), uintptr(eaLength), 0) + r0, _, _ := syscall.SyscallN(procNtCreateFile.Addr(), uintptr(unsafe.Pointer(handle)), uintptr(accessMask), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(fileAttributes), uintptr(shareAccess), uintptr(createDisposition), uintptr(createOptions), uintptr(unsafe.Pointer(eaBuffer)), uintptr(eaLength)) status = uint32(r0) return } func NtCreateJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) { - r0, _, _ := syscall.Syscall(procNtCreateJobObject.Addr(), 3, uintptr(unsafe.Pointer(jobHandle)), uintptr(desiredAccess), uintptr(unsafe.Pointer(objAttributes))) + r0, _, _ := syscall.SyscallN(procNtCreateJobObject.Addr(), uintptr(unsafe.Pointer(jobHandle)), uintptr(desiredAccess), uintptr(unsafe.Pointer(objAttributes))) status = uint32(r0) return } func NtOpenDirectoryObject(handle *uintptr, accessMask uint32, oa *ObjectAttributes) (status uint32) { - r0, _, _ := syscall.Syscall(procNtOpenDirectoryObject.Addr(), 3, uintptr(unsafe.Pointer(handle)), uintptr(accessMask), uintptr(unsafe.Pointer(oa))) + r0, _, _ := syscall.SyscallN(procNtOpenDirectoryObject.Addr(), uintptr(unsafe.Pointer(handle)), uintptr(accessMask), uintptr(unsafe.Pointer(oa))) status = uint32(r0) return } func NtOpenJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) { - r0, _, _ := syscall.Syscall(procNtOpenJobObject.Addr(), 3, uintptr(unsafe.Pointer(jobHandle)), uintptr(desiredAccess), uintptr(unsafe.Pointer(objAttributes))) + r0, _, _ := syscall.SyscallN(procNtOpenJobObject.Addr(), uintptr(unsafe.Pointer(jobHandle)), uintptr(desiredAccess), uintptr(unsafe.Pointer(objAttributes))) status = uint32(r0) return } @@ -593,31 +590,31 @@ func NtQueryDirectoryObject(handle uintptr, buffer *byte, length uint32, singleE if restartScan { _p1 = 1 } - r0, _, _ := syscall.Syscall9(procNtQueryDirectoryObject.Addr(), 7, uintptr(handle), uintptr(unsafe.Pointer(buffer)), uintptr(length), uintptr(_p0), uintptr(_p1), uintptr(unsafe.Pointer(context)), uintptr(unsafe.Pointer(returnLength)), 0, 0) + r0, _, _ := syscall.SyscallN(procNtQueryDirectoryObject.Addr(), uintptr(handle), uintptr(unsafe.Pointer(buffer)), uintptr(length), uintptr(_p0), uintptr(_p1), uintptr(unsafe.Pointer(context)), uintptr(unsafe.Pointer(returnLength))) status = uint32(r0) return } func NtQueryInformationProcess(processHandle windows.Handle, processInfoClass uint32, processInfo unsafe.Pointer, processInfoLength uint32, returnLength *uint32) (status uint32) { - r0, _, _ := syscall.Syscall6(procNtQueryInformationProcess.Addr(), 5, uintptr(processHandle), uintptr(processInfoClass), uintptr(processInfo), uintptr(processInfoLength), uintptr(unsafe.Pointer(returnLength)), 0) + r0, _, _ := syscall.SyscallN(procNtQueryInformationProcess.Addr(), uintptr(processHandle), uintptr(processInfoClass), uintptr(processInfo), uintptr(processInfoLength), uintptr(unsafe.Pointer(returnLength))) status = uint32(r0) return } func NtQuerySystemInformation(systemInfoClass int, systemInformation unsafe.Pointer, systemInfoLength uint32, returnLength *uint32) (status uint32) { - r0, _, _ := syscall.Syscall6(procNtQuerySystemInformation.Addr(), 4, uintptr(systemInfoClass), uintptr(systemInformation), uintptr(systemInfoLength), uintptr(unsafe.Pointer(returnLength)), 0, 0) + r0, _, _ := syscall.SyscallN(procNtQuerySystemInformation.Addr(), uintptr(systemInfoClass), uintptr(systemInformation), uintptr(systemInfoLength), uintptr(unsafe.Pointer(returnLength))) status = uint32(r0) return } func NtSetInformationFile(handle uintptr, iosb *IOStatusBlock, information uintptr, length uint32, class uint32) (status uint32) { - r0, _, _ := syscall.Syscall6(procNtSetInformationFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(information), uintptr(length), uintptr(class), 0) + r0, _, _ := syscall.SyscallN(procNtSetInformationFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(information), uintptr(length), uintptr(class)) status = uint32(r0) return } func RtlNtStatusToDosError(status uint32) (winerr error) { - r0, _, _ := syscall.Syscall(procRtlNtStatusToDosError.Addr(), 1, uintptr(status), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlNtStatusToDosError.Addr(), uintptr(status)) if r0 != 0 { winerr = syscall.Errno(r0) } @@ -625,7 +622,7 @@ func RtlNtStatusToDosError(status uint32) (winerr error) { } func ORCloseHive(handle ORHKey) (win32err error) { - r0, _, _ := syscall.Syscall(procORCloseHive.Addr(), 1, uintptr(handle), 0, 0) + r0, _, _ := syscall.SyscallN(procORCloseHive.Addr(), uintptr(handle)) if r0 != 0 { win32err = syscall.Errno(r0) } @@ -633,7 +630,7 @@ func ORCloseHive(handle ORHKey) (win32err error) { } func ORCloseKey(handle ORHKey) (win32err error) { - r0, _, _ := syscall.Syscall(procORCloseKey.Addr(), 1, uintptr(handle), 0, 0) + r0, _, _ := syscall.SyscallN(procORCloseKey.Addr(), uintptr(handle)) if r0 != 0 { win32err = syscall.Errno(r0) } @@ -641,7 +638,7 @@ func ORCloseKey(handle ORHKey) (win32err error) { } func ORCreateHive(key *ORHKey) (win32err error) { - r0, _, _ := syscall.Syscall(procORCreateHive.Addr(), 1, uintptr(unsafe.Pointer(key)), 0, 0) + r0, _, _ := syscall.SyscallN(procORCreateHive.Addr(), uintptr(unsafe.Pointer(key))) if r0 != 0 { win32err = syscall.Errno(r0) } @@ -658,7 +655,7 @@ func ORCreateKey(handle ORHKey, subKey string, class uintptr, options uint32, se } func _ORCreateKey(handle ORHKey, subKey *uint16, class uintptr, options uint32, securityDescriptor uintptr, result *ORHKey, disposition *uint32) (win32err error) { - r0, _, _ := syscall.Syscall9(procORCreateKey.Addr(), 7, uintptr(handle), uintptr(unsafe.Pointer(subKey)), uintptr(class), uintptr(options), uintptr(securityDescriptor), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition)), 0, 0) + r0, _, _ := syscall.SyscallN(procORCreateKey.Addr(), uintptr(handle), uintptr(unsafe.Pointer(subKey)), uintptr(class), uintptr(options), uintptr(securityDescriptor), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition))) if r0 != 0 { win32err = syscall.Errno(r0) } @@ -675,7 +672,7 @@ func ORDeleteKey(handle ORHKey, subKey string) (win32err error) { } func _ORDeleteKey(handle ORHKey, subKey *uint16) (win32err error) { - r0, _, _ := syscall.Syscall(procORDeleteKey.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(subKey)), 0) + r0, _, _ := syscall.SyscallN(procORDeleteKey.Addr(), uintptr(handle), uintptr(unsafe.Pointer(subKey))) if r0 != 0 { win32err = syscall.Errno(r0) } @@ -697,7 +694,7 @@ func ORGetValue(handle ORHKey, subKey string, value string, valueType *uint32, d } func _ORGetValue(handle ORHKey, subKey *uint16, value *uint16, valueType *uint32, data *byte, dataLen *uint32) (win32err error) { - r0, _, _ := syscall.Syscall6(procORGetValue.Addr(), 6, uintptr(handle), uintptr(unsafe.Pointer(subKey)), uintptr(unsafe.Pointer(value)), uintptr(unsafe.Pointer(valueType)), uintptr(unsafe.Pointer(data)), uintptr(unsafe.Pointer(dataLen))) + r0, _, _ := syscall.SyscallN(procORGetValue.Addr(), uintptr(handle), uintptr(unsafe.Pointer(subKey)), uintptr(unsafe.Pointer(value)), uintptr(unsafe.Pointer(valueType)), uintptr(unsafe.Pointer(data)), uintptr(unsafe.Pointer(dataLen))) if r0 != 0 { win32err = syscall.Errno(r0) } @@ -709,7 +706,7 @@ func ORMergeHives(hiveHandles []ORHKey, result *ORHKey) (win32err error) { if len(hiveHandles) > 0 { _p0 = &hiveHandles[0] } - r0, _, _ := syscall.Syscall(procORMergeHives.Addr(), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(hiveHandles)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.SyscallN(procORMergeHives.Addr(), uintptr(unsafe.Pointer(_p0)), uintptr(len(hiveHandles)), uintptr(unsafe.Pointer(result))) if r0 != 0 { win32err = syscall.Errno(r0) } @@ -726,7 +723,7 @@ func OROpenHive(hivePath string, result *ORHKey) (win32err error) { } func _OROpenHive(hivePath *uint16, result *ORHKey) (win32err error) { - r0, _, _ := syscall.Syscall(procOROpenHive.Addr(), 2, uintptr(unsafe.Pointer(hivePath)), uintptr(unsafe.Pointer(result)), 0) + r0, _, _ := syscall.SyscallN(procOROpenHive.Addr(), uintptr(unsafe.Pointer(hivePath)), uintptr(unsafe.Pointer(result))) if r0 != 0 { win32err = syscall.Errno(r0) } @@ -743,7 +740,7 @@ func OROpenKey(handle ORHKey, subKey string, result *ORHKey) (win32err error) { } func _OROpenKey(handle ORHKey, subKey *uint16, result *ORHKey) (win32err error) { - r0, _, _ := syscall.Syscall(procOROpenKey.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(subKey)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.SyscallN(procOROpenKey.Addr(), uintptr(handle), uintptr(unsafe.Pointer(subKey)), uintptr(unsafe.Pointer(result))) if r0 != 0 { win32err = syscall.Errno(r0) } @@ -760,7 +757,7 @@ func ORSaveHive(handle ORHKey, hivePath string, osMajorVersion uint32, osMinorVe } func _ORSaveHive(handle ORHKey, hivePath *uint16, osMajorVersion uint32, osMinorVersion uint32) (win32err error) { - r0, _, _ := syscall.Syscall6(procORSaveHive.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(hivePath)), uintptr(osMajorVersion), uintptr(osMinorVersion), 0, 0) + r0, _, _ := syscall.SyscallN(procORSaveHive.Addr(), uintptr(handle), uintptr(unsafe.Pointer(hivePath)), uintptr(osMajorVersion), uintptr(osMinorVersion)) if r0 != 0 { win32err = syscall.Errno(r0) } @@ -777,7 +774,7 @@ func ORSetValue(handle ORHKey, valueName string, valueType uint32, data *byte, d } func _ORSetValue(handle ORHKey, valueName *uint16, valueType uint32, data *byte, dataLen uint32) (win32err error) { - r0, _, _ := syscall.Syscall6(procORSetValue.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(valueName)), uintptr(valueType), uintptr(unsafe.Pointer(data)), uintptr(dataLen), 0) + r0, _, _ := syscall.SyscallN(procORSetValue.Addr(), uintptr(handle), uintptr(unsafe.Pointer(valueName)), uintptr(valueType), uintptr(unsafe.Pointer(data)), uintptr(dataLen)) if r0 != 0 { win32err = syscall.Errno(r0) } diff --git a/vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go index 9b619b6e62..e43d59a406 100644 --- a/vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go @@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error { case errnoERROR_IO_PENDING: return errERROR_IO_PENDING } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) return e } @@ -46,7 +43,7 @@ var ( ) func SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) { - r0, _, _ := syscall.Syscall(procSetCurrentThreadCompartmentId.Addr(), 1, uintptr(compartmentId), 0, 0) + r0, _, _ := syscall.SyscallN(procSetCurrentThreadCompartmentId.Addr(), uintptr(compartmentId)) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff diff --git a/vendor/github.com/containers/buildah/.cirrus.yml b/vendor/github.com/containers/buildah/.cirrus.yml index 6dd95ed381..8eb0d98323 100644 --- a/vendor/github.com/containers/buildah/.cirrus.yml +++ b/vendor/github.com/containers/buildah/.cirrus.yml @@ -32,7 +32,7 @@ env: DEBIAN_NAME: "debian-13" # Image identifiers - IMAGE_SUFFIX: "c20240320t153921z-f39f38d13" + IMAGE_SUFFIX: "c20240411t124913z-f39f38d13" FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}" PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}" DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}" diff --git a/vendor/github.com/containers/buildah/.packit.yaml b/vendor/github.com/containers/buildah/.packit.yaml index c63311c8cb..ae0e7cc301 100644 --- a/vendor/github.com/containers/buildah/.packit.yaml +++ b/vendor/github.com/containers/buildah/.packit.yaml @@ -20,8 +20,6 @@ jobs: - fedora-all-aarch64 - fedora-eln-x86_64 - fedora-eln-aarch64 - - centos-stream+epel-next-8-x86_64 - - centos-stream+epel-next-8-aarch64 - epel-9-x86_64 - epel-9-aarch64 additional_repos: diff --git a/vendor/github.com/containers/buildah/Makefile b/vendor/github.com/containers/buildah/Makefile index 3a955c85a7..61ec458ea5 100644 --- a/vendor/github.com/containers/buildah/Makefile +++ b/vendor/github.com/containers/buildah/Makefile @@ -22,14 +22,8 @@ STRIP ?= strip GO := go GO_LDFLAGS := $(shell if $(GO) version|grep -q gccgo; then echo "-gccgoflags"; else echo "-ldflags"; fi) GO_GCFLAGS := $(shell if $(GO) version|grep -q gccgo; then echo "-gccgoflags"; else echo "-gcflags"; fi) -# test for go module support -ifeq ($(shell $(GO) help mod >/dev/null 2>&1 && echo true), true) -export GO_BUILD=GO111MODULE=on $(GO) build -mod=vendor -export GO_TEST=GO111MODULE=on $(GO) test -mod=vendor -else export GO_BUILD=$(GO) build export GO_TEST=$(GO) test -endif RACEFLAGS := $(shell $(GO_TEST) -race ./pkg/dummy > /dev/null 2>&1 && echo -race) COMMIT_NO ?= $(shell git rev-parse HEAD 2> /dev/null || true) @@ -47,7 +41,7 @@ LIBSECCOMP_COMMIT := release-2.3 EXTRA_LDFLAGS ?= BUILDAH_LDFLAGS := $(GO_LDFLAGS) '-X main.GitCommit=$(GIT_COMMIT) -X main.buildInfo=$(SOURCE_DATE_EPOCH) -X main.cniVersion=$(CNI_COMMIT) $(EXTRA_LDFLAGS)' -SOURCES=*.go imagebuildah/*.go bind/*.go chroot/*.go copier/*.go define/*.go docker/*.go internal/config/*.go internal/mkcw/*.go internal/mkcw/types/*.go internal/parse/*.go internal/sbom/*.go internal/source/*.go internal/tmpdir/*.go internal/util/*.go internal/volumes/*.go manifests/*.go pkg/chrootuser/*.go pkg/cli/*.go pkg/completion/*.go pkg/formats/*.go pkg/overlay/*.go pkg/parse/*.go pkg/rusage/*.go pkg/sshagent/*.go pkg/umask/*.go pkg/util/*.go util/*.go +SOURCES=*.go imagebuildah/*.go bind/*.go chroot/*.go copier/*.go define/*.go docker/*.go internal/config/*.go internal/mkcw/*.go internal/mkcw/types/*.go internal/parse/*.go internal/sbom/*.go internal/source/*.go internal/tmpdir/*.go internal/*.go internal/util/*.go internal/volumes/*.go manifests/*.go pkg/chrootuser/*.go pkg/cli/*.go pkg/completion/*.go pkg/formats/*.go pkg/jail/*.go pkg/overlay/*.go pkg/parse/*.go pkg/rusage/*.go pkg/sshagent/*.go pkg/umask/*.go pkg/util/*.go pkg/volumes/*.go util/*.go LINTFLAGS ?= @@ -209,9 +203,10 @@ vendor-in-container: .PHONY: vendor vendor: - GO111MODULE=on $(GO) mod tidy - GO111MODULE=on $(GO) mod vendor - GO111MODULE=on $(GO) mod verify + $(GO) mod tidy + $(GO) mod vendor + $(GO) mod verify + if test -n "$(strip $(shell go env GOTOOLCHAIN))"; then go mod edit -toolchain none ; fi .PHONY: lint lint: install.tools diff --git a/vendor/github.com/containers/buildah/README.md b/vendor/github.com/containers/buildah/README.md index 0e6a1553c6..bd07413806 100644 --- a/vendor/github.com/containers/buildah/README.md +++ b/vendor/github.com/containers/buildah/README.md @@ -20,6 +20,8 @@ The Buildah package provides a command line tool that can be used to For blogs, release announcements and more, please checkout the [buildah.io](https://buildah.io) website! +**[Buildah Container Images](https://github.com/containers/image_build/blob/main/buildah/README.md)** + **[Buildah Demos](demos)** **[Changelog](CHANGELOG.md)** diff --git a/vendor/github.com/containers/buildah/digester.go b/vendor/github.com/containers/buildah/digester.go index 0ed8fa41f7..8a2c0c0dec 100644 --- a/vendor/github.com/containers/buildah/digester.go +++ b/vendor/github.com/containers/buildah/digester.go @@ -141,9 +141,12 @@ func newTarFilterer(writeCloser io.WriteCloser, filter func(hdr *tar.Header) (sk closed = filterer.closed filterer.closedLock.Unlock() } - pipeReader.Close() - tarWriter.Close() - writeCloser.Close() + err1 := tarWriter.Close() + err := writeCloser.Close() + if err == nil { + err = err1 + } + pipeReader.CloseWithError(err) filterer.wg.Done() }() return filterer diff --git a/vendor/github.com/containers/buildah/image.go b/vendor/github.com/containers/buildah/image.go index a26153b9b6..aaad13807a 100644 --- a/vendor/github.com/containers/buildah/image.go +++ b/vendor/github.com/containers/buildah/image.go @@ -585,50 +585,54 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System return nil, fmt.Errorf("compressing %s: %w", what, err) } writer := io.MultiWriter(writeCloser, srcHasher.Hash()) - // Scrub any local user names that might correspond to UIDs or GIDs of - // files in this layer. { + // Tweak the contents of layers we're creating. nestedWriteCloser := ioutils.NewWriteCloserWrapper(writer, writeCloser.Close) writeCloser = newTarFilterer(nestedWriteCloser, func(hdr *tar.Header) (bool, bool, io.Reader) { + // Scrub any local user names that might correspond to UIDs or GIDs of + // files in this layer. hdr.Uname, hdr.Gname = "", "" - return false, false, nil - }) - writer = io.Writer(writeCloser) - } - // Use specified timestamps in the layer, if we're doing that for - // history entries. - if i.created != nil { - nestedWriteCloser := ioutils.NewWriteCloserWrapper(writer, writeCloser.Close) - writeCloser = newTarFilterer(nestedWriteCloser, func(hdr *tar.Header) (bool, bool, io.Reader) { - // Changing a zeroed field to a non-zero field - // can affect the format that the library uses - // for writing the header, so only change - // fields that are already set to avoid - // changing the format (and as a result, - // changing the length) of the header that we - // write. - if !hdr.ModTime.IsZero() { - hdr.ModTime = *i.created - } - if !hdr.AccessTime.IsZero() { - hdr.AccessTime = *i.created - } - if !hdr.ChangeTime.IsZero() { - hdr.ChangeTime = *i.created + // Use specified timestamps in the layer, if we're doing that for history + // entries. + if i.created != nil { + // Changing a zeroed field to a non-zero field can affect the + // format that the library uses for writing the header, so only + // change fields that are already set to avoid changing the + // format (and as a result, changing the length) of the header + // that we write. + if !hdr.ModTime.IsZero() { + hdr.ModTime = *i.created + } + if !hdr.AccessTime.IsZero() { + hdr.AccessTime = *i.created + } + if !hdr.ChangeTime.IsZero() { + hdr.ChangeTime = *i.created + } + return false, false, nil } return false, false, nil }) writer = io.Writer(writeCloser) } + // Okay, copy from the raw diff through the filter, compressor, and counter and + // digesters. size, err := io.Copy(writer, rc) - writeCloser.Close() - layerFile.Close() + if err := writeCloser.Close(); err != nil { + layerFile.Close() + rc.Close() + return nil, fmt.Errorf("storing %s to file: %w on pipe close", what, err) + } + if err := layerFile.Close(); err != nil { + rc.Close() + return nil, fmt.Errorf("storing %s to file: %w on file close", what, err) + } rc.Close() if errChan != nil { err = <-errChan if err != nil { - return nil, err + return nil, fmt.Errorf("extracting container rootfs: %w", err) } } diff --git a/vendor/github.com/containers/buildah/run_linux.go b/vendor/github.com/containers/buildah/run_linux.go index 300f6590bf..14c74af00c 100644 --- a/vendor/github.com/containers/buildah/run_linux.go +++ b/vendor/github.com/containers/buildah/run_linux.go @@ -47,6 +47,7 @@ import ( "golang.org/x/exp/slices" "golang.org/x/sys/unix" "tags.cncf.io/container-device-interface/pkg/cdi" + "tags.cncf.io/container-device-interface/pkg/parser" ) var ( @@ -69,40 +70,55 @@ func setChildProcess() error { } func (b *Builder) cdiSetupDevicesInSpec(deviceSpecs []string, configDir string, spec *specs.Spec) ([]string, error) { - leftoverDevices := deviceSpecs - registry, err := cdi.NewCache() + var configDirs []string + defConfig, err := config.Default() if err != nil { - return nil, fmt.Errorf("creating CDI registry: %w", err) + return nil, fmt.Errorf("failed to get container config: %w", err) } - var configDirs []string + // The CDI cache prioritizes entries from directories that are later in + // the list of ones it scans, so start with our general config, then + // append values passed to us through API layers. + configDirs = slices.Clone(defConfig.Engine.CdiSpecDirs.Get()) if b.CDIConfigDir != "" { configDirs = append(configDirs, b.CDIConfigDir) } if configDir != "" { configDirs = append(configDirs, configDir) } - // TODO: CdiSpecDirs will be in containers/common v0.59.0 or later? - // defConfig, err := config.Default() - // if err != nil { - // return nil, fmt.Errorf("failed to get container config: %w", err) - // } - // configDirs = append(configDirs, defConfig.Engine.CdiSpecDirs.Get()...) - if len(configDirs) > 0 { - if err := registry.Configure(cdi.WithSpecDirs(configDirs...)); err != nil { - return nil, fmt.Errorf("CDI registry ignored configured directories %v: %w", configDirs, err) + if len(configDirs) == 0 { + // No directories to scan for CDI configuration means that CDI + // won't have any details for setting up any devices, so we + // don't need to be doing anything here. + return deviceSpecs, nil + } + var qualifiedDeviceSpecs, unqualifiedDeviceSpecs []string + for _, deviceSpec := range deviceSpecs { + if parser.IsQualifiedName(deviceSpec) { + qualifiedDeviceSpecs = append(qualifiedDeviceSpecs, deviceSpec) + } else { + unqualifiedDeviceSpecs = append(unqualifiedDeviceSpecs, deviceSpec) } } - if err := registry.Refresh(); err != nil { - logrus.Warnf("CDI registry refresh: %v", err) + if len(qualifiedDeviceSpecs) == 0 { + // None of the specified devices were in the form that would be + // handled by CDI, so we don't need to do anything here. + return deviceSpecs, nil + } + if err := cdi.Configure(cdi.WithSpecDirs(configDirs...)); err != nil { + return nil, fmt.Errorf("CDI default registry ignored configured directories %v: %w", configDirs, err) + } + leftoverDevices := slices.Clone(deviceSpecs) + if err := cdi.Refresh(); err != nil { + logrus.Warnf("CDI default registry refresh: %v", err) } else { - leftoverDevices, err = registry.InjectDevices(spec, deviceSpecs...) + leftoverDevices, err = cdi.InjectDevices(spec, qualifiedDeviceSpecs...) if err != nil { - logrus.Debugf("CDI device injection: %v, unresolved list %v", err, leftoverDevices) + return nil, fmt.Errorf("CDI device injection (leftover devices: %v): %w", leftoverDevices, err) } } removed := slices.DeleteFunc(slices.Clone(deviceSpecs), func(t string) bool { return slices.Contains(leftoverDevices, t) }) - logrus.Debugf("CDI taking care of devices %v, leaving devices %v", removed, leftoverDevices) - return leftoverDevices, nil + logrus.Debugf("CDI taking care of devices %v, leaving devices %v, skipped %v", removed, leftoverDevices, unqualifiedDeviceSpecs) + return append(leftoverDevices, unqualifiedDeviceSpecs...), nil } // Extract the device list so that we can still try to make it work if diff --git a/vendor/github.com/containers/common/internal/deepcopy.go b/vendor/github.com/containers/common/internal/deepcopy.go index 157f6ee4ce..38c41d7b1e 100644 --- a/vendor/github.com/containers/common/internal/deepcopy.go +++ b/vendor/github.com/containers/common/internal/deepcopy.go @@ -1,9 +1,10 @@ package internal import ( + "slices" + v1 "github.com/opencontainers/image-spec/specs-go/v1" "golang.org/x/exp/maps" - "golang.org/x/exp/slices" ) // DeepCopyDescriptor copies a Descriptor, deeply copying its contents diff --git a/vendor/github.com/containers/common/libimage/manifest_list.go b/vendor/github.com/containers/common/libimage/manifest_list.go index 14d00685cb..e52d5469ba 100644 --- a/vendor/github.com/containers/common/libimage/manifest_list.go +++ b/vendor/github.com/containers/common/libimage/manifest_list.go @@ -6,6 +6,7 @@ import ( "context" "errors" "fmt" + "slices" "time" "github.com/containers/common/libimage/define" @@ -20,7 +21,6 @@ import ( "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "golang.org/x/exp/maps" - "golang.org/x/exp/slices" ) // NOTE: the abstractions and APIs here are a first step to further merge diff --git a/vendor/github.com/containers/common/libimage/manifests/manifests.go b/vendor/github.com/containers/common/libimage/manifests/manifests.go index 70410508ef..64f06350f0 100644 --- a/vendor/github.com/containers/common/libimage/manifests/manifests.go +++ b/vendor/github.com/containers/common/libimage/manifests/manifests.go @@ -11,6 +11,7 @@ import ( "net/http" "os" "path/filepath" + "slices" "strconv" "strings" "time" @@ -40,7 +41,6 @@ import ( v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" "golang.org/x/exp/maps" - "golang.org/x/exp/slices" ) const ( diff --git a/vendor/github.com/containers/common/libimage/pull.go b/vendor/github.com/containers/common/libimage/pull.go index e872b1b80f..3db1b2992b 100644 --- a/vendor/github.com/containers/common/libimage/pull.go +++ b/vendor/github.com/containers/common/libimage/pull.go @@ -115,7 +115,7 @@ func (r *Runtime) Pull(ctx context.Context, name string, pullPolicy config.PullP // off and entirely ignored. The digest is the sole source of truth. normalizedName, _, normalizeError := normalizeTaggedDigestedString(name) if normalizeError != nil { - return nil, normalizeError + return nil, fmt.Errorf(`parsing reference %q: %w`, name, normalizeError) } name = normalizedName diff --git a/vendor/github.com/containers/common/libimage/runtime.go b/vendor/github.com/containers/common/libimage/runtime.go index 632f0fccf1..1baf41d5d2 100644 --- a/vendor/github.com/containers/common/libimage/runtime.go +++ b/vendor/github.com/containers/common/libimage/runtime.go @@ -250,7 +250,7 @@ func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image, // off and entirely ignored. The digest is the sole source of truth. normalizedName, possiblyUnqualifiedNamedReference, err := normalizeTaggedDigestedString(name) if err != nil { - return nil, "", err + return nil, "", fmt.Errorf(`parsing reference %q: %w`, name, err) } name = normalizedName diff --git a/vendor/github.com/containers/common/libnetwork/cni/cni_conversion.go b/vendor/github.com/containers/common/libnetwork/cni/cni_conversion.go index ffadae0d2e..15fd1bf4e3 100644 --- a/vendor/github.com/containers/common/libnetwork/cni/cni_conversion.go +++ b/vendor/github.com/containers/common/libnetwork/cni/cni_conversion.go @@ -9,6 +9,7 @@ import ( "net" "os" "path/filepath" + "slices" "strconv" "strings" "time" @@ -18,7 +19,6 @@ import ( "github.com/containers/common/libnetwork/types" "github.com/containers/common/libnetwork/util" "github.com/sirupsen/logrus" - "golang.org/x/exp/slices" "golang.org/x/sys/unix" ) diff --git a/vendor/github.com/containers/common/libnetwork/cni/config.go b/vendor/github.com/containers/common/libnetwork/cni/config.go index 71b7872f1f..43f3bfef46 100644 --- a/vendor/github.com/containers/common/libnetwork/cni/config.go +++ b/vendor/github.com/containers/common/libnetwork/cni/config.go @@ -7,11 +7,11 @@ import ( "fmt" "net" "os" + "slices" internalutil "github.com/containers/common/libnetwork/internal/util" "github.com/containers/common/libnetwork/types" "github.com/sirupsen/logrus" - "golang.org/x/exp/slices" ) func (n *cniNetwork) NetworkUpdate(_ string, _ types.NetworkUpdateOptions) error { diff --git a/vendor/github.com/containers/common/libnetwork/etchosts/hosts.go b/vendor/github.com/containers/common/libnetwork/etchosts/hosts.go index 6bdb34e64b..7ca62137b2 100644 --- a/vendor/github.com/containers/common/libnetwork/etchosts/hosts.go +++ b/vendor/github.com/containers/common/libnetwork/etchosts/hosts.go @@ -6,10 +6,10 @@ import ( "fmt" "io" "os" + "slices" "strings" "github.com/containers/common/pkg/config" - "golang.org/x/exp/slices" ) const ( diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/bridge.go b/vendor/github.com/containers/common/libnetwork/internal/util/bridge.go index c511a2df75..b75e9a57f4 100644 --- a/vendor/github.com/containers/common/libnetwork/internal/util/bridge.go +++ b/vendor/github.com/containers/common/libnetwork/internal/util/bridge.go @@ -3,11 +3,11 @@ package util import ( "fmt" "net" + "slices" "github.com/containers/common/libnetwork/types" "github.com/containers/common/libnetwork/util" "github.com/containers/common/pkg/config" - "golang.org/x/exp/slices" ) func CreateBridge(n NetUtil, network *types.Network, usedNetworks []*net.IPNet, subnetPools []config.SubnetPool) error { diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/util.go b/vendor/github.com/containers/common/libnetwork/internal/util/util.go index 8405bffd91..96c21e76f6 100644 --- a/vendor/github.com/containers/common/libnetwork/internal/util/util.go +++ b/vendor/github.com/containers/common/libnetwork/internal/util/util.go @@ -4,11 +4,11 @@ import ( "errors" "fmt" "net" + "slices" "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/config" "github.com/sirupsen/logrus" - "golang.org/x/exp/slices" ) // GetBridgeInterfaceNames returns all bridge interface names diff --git a/vendor/github.com/containers/common/libnetwork/netavark/config.go b/vendor/github.com/containers/common/libnetwork/netavark/config.go index 3a77d3ab2f..99fba5a0a8 100644 --- a/vendor/github.com/containers/common/libnetwork/netavark/config.go +++ b/vendor/github.com/containers/common/libnetwork/netavark/config.go @@ -10,13 +10,13 @@ import ( "os" "path/filepath" "reflect" + "slices" "strconv" "time" internalutil "github.com/containers/common/libnetwork/internal/util" "github.com/containers/common/libnetwork/types" "github.com/containers/storage/pkg/stringid" - "golang.org/x/exp/slices" ) func sliceRemoveDuplicates(strList []string) []string { diff --git a/vendor/github.com/containers/common/libnetwork/netavark/run.go b/vendor/github.com/containers/common/libnetwork/netavark/run.go index d13e51ff81..9a04120b59 100644 --- a/vendor/github.com/containers/common/libnetwork/netavark/run.go +++ b/vendor/github.com/containers/common/libnetwork/netavark/run.go @@ -5,13 +5,13 @@ package netavark import ( "encoding/json" "fmt" + "slices" "strconv" "strings" "github.com/containers/common/libnetwork/internal/util" "github.com/containers/common/libnetwork/types" "github.com/sirupsen/logrus" - "golang.org/x/exp/slices" ) type netavarkOptions struct { diff --git a/vendor/github.com/containers/common/libnetwork/pasta/pasta_linux.go b/vendor/github.com/containers/common/libnetwork/pasta/pasta_linux.go index 4b31320b5c..2e6dc7d59f 100644 --- a/vendor/github.com/containers/common/libnetwork/pasta/pasta_linux.go +++ b/vendor/github.com/containers/common/libnetwork/pasta/pasta_linux.go @@ -15,6 +15,7 @@ import ( "fmt" "net" "os/exec" + "slices" "strings" "github.com/containernetworking/plugins/pkg/ns" @@ -54,100 +55,16 @@ func Setup(opts *SetupOptions) error { // Note that there is no need for any special cleanup logic, the pasta // process will automatically exit when the netns path is deleted. func Setup2(opts *SetupOptions) (*SetupResult, error) { - NoTCPInitPorts := true - NoUDPInitPorts := true - NoTCPNamespacePorts := true - NoUDPNamespacePorts := true - NoMapGW := true - path, err := opts.Config.FindHelperBinary(BinaryName, true) if err != nil { return nil, fmt.Errorf("could not find pasta, the network namespace can't be configured: %w", err) } - cmdArgs := []string{} - cmdArgs = append(cmdArgs, "--config-net") - - for _, i := range opts.Ports { - protocols := strings.Split(i.Protocol, ",") - for _, protocol := range protocols { - var addr string - - if i.HostIP != "" { - addr = i.HostIP + "/" - } - - switch protocol { - case "tcp": - cmdArgs = append(cmdArgs, "-t") - case "udp": - cmdArgs = append(cmdArgs, "-u") - default: - return nil, fmt.Errorf("can't forward protocol: %s", protocol) - } - - arg := fmt.Sprintf("%s%d-%d:%d-%d", addr, - i.HostPort, - i.HostPort+i.Range-1, - i.ContainerPort, - i.ContainerPort+i.Range-1) - cmdArgs = append(cmdArgs, arg) - } - } - - // first append options set in the config - cmdArgs = append(cmdArgs, opts.Config.Network.PastaOptions.Get()...) - // then append the ones that were set on the cli - cmdArgs = append(cmdArgs, opts.ExtraOptions...) - - var dnsForwardIPs []string - for i, opt := range cmdArgs { - switch opt { - case "-t", "--tcp-ports": - NoTCPInitPorts = false - case "-u", "--udp-ports": - NoUDPInitPorts = false - case "-T", "--tcp-ns": - NoTCPNamespacePorts = false - case "-U", "--udp-ns": - NoUDPNamespacePorts = false - case "--map-gw": - NoMapGW = false - // not an actual pasta(1) option - cmdArgs = append(cmdArgs[:i], cmdArgs[i+1:]...) - case dnsForwardOpt: - // if there is no arg after it pasta will likely error out anyway due invalid cli args - if len(cmdArgs) > i+1 { - dnsForwardIPs = append(dnsForwardIPs, cmdArgs[i+1]) - } - } - } - - if len(dnsForwardIPs) == 0 { - // the user did not request custom --dns-forward so add our own. - cmdArgs = append(cmdArgs, dnsForwardOpt, dnsForwardIpv4) - dnsForwardIPs = append(dnsForwardIPs, dnsForwardIpv4) - } - - if NoTCPInitPorts { - cmdArgs = append(cmdArgs, "-t", "none") - } - if NoUDPInitPorts { - cmdArgs = append(cmdArgs, "-u", "none") - } - if NoTCPNamespacePorts { - cmdArgs = append(cmdArgs, "-T", "none") - } - if NoUDPNamespacePorts { - cmdArgs = append(cmdArgs, "-U", "none") - } - if NoMapGW { - cmdArgs = append(cmdArgs, "--no-map-gw") + cmdArgs, dnsForwardIPs, err := createPastaArgs(opts) + if err != nil { + return nil, err } - // always pass --quiet to silence the info output from pasta - cmdArgs = append(cmdArgs, "--quiet", "--netns", opts.Netns) - logrus.Debugf("pasta arguments: %s", strings.Join(cmdArgs, " ")) // pasta forks once ready, and quits once we delete the target namespace @@ -205,3 +122,103 @@ func Setup2(opts *SetupOptions) (*SetupResult, error) { return result, nil } + +// createPastaArgs creates the pasta arguments, it returns the args to be passed to pasta(1) and as second arg the dns forward ips used. +func createPastaArgs(opts *SetupOptions) ([]string, []string, error) { + noTCPInitPorts := true + noUDPInitPorts := true + noTCPNamespacePorts := true + noUDPNamespacePorts := true + noMapGW := true + + cmdArgs := []string{"--config-net"} + // first append options set in the config + cmdArgs = append(cmdArgs, opts.Config.Network.PastaOptions.Get()...) + // then append the ones that were set on the cli + cmdArgs = append(cmdArgs, opts.ExtraOptions...) + + cmdArgs = slices.DeleteFunc(cmdArgs, func(s string) bool { + // --map-gw is not a real pasta(1) option so we must remove it + // and not add --no-map-gw below + if s == "--map-gw" { + noMapGW = false + return true + } + return false + }) + + var dnsForwardIPs []string + for i, opt := range cmdArgs { + switch opt { + case "-t", "--tcp-ports": + noTCPInitPorts = false + case "-u", "--udp-ports": + noUDPInitPorts = false + case "-T", "--tcp-ns": + noTCPNamespacePorts = false + case "-U", "--udp-ns": + noUDPNamespacePorts = false + case dnsForwardOpt: + // if there is no arg after it pasta will likely error out anyway due invalid cli args + if len(cmdArgs) > i+1 { + dnsForwardIPs = append(dnsForwardIPs, cmdArgs[i+1]) + } + } + } + + for _, i := range opts.Ports { + protocols := strings.Split(i.Protocol, ",") + for _, protocol := range protocols { + var addr string + + if i.HostIP != "" { + addr = i.HostIP + "/" + } + + switch protocol { + case "tcp": + noTCPInitPorts = false + cmdArgs = append(cmdArgs, "-t") + case "udp": + noUDPInitPorts = false + cmdArgs = append(cmdArgs, "-u") + default: + return nil, nil, fmt.Errorf("can't forward protocol: %s", protocol) + } + + arg := fmt.Sprintf("%s%d-%d:%d-%d", addr, + i.HostPort, + i.HostPort+i.Range-1, + i.ContainerPort, + i.ContainerPort+i.Range-1) + cmdArgs = append(cmdArgs, arg) + } + } + + if len(dnsForwardIPs) == 0 { + // the user did not request custom --dns-forward so add our own. + cmdArgs = append(cmdArgs, dnsForwardOpt, dnsForwardIpv4) + dnsForwardIPs = append(dnsForwardIPs, dnsForwardIpv4) + } + + if noTCPInitPorts { + cmdArgs = append(cmdArgs, "-t", "none") + } + if noUDPInitPorts { + cmdArgs = append(cmdArgs, "-u", "none") + } + if noTCPNamespacePorts { + cmdArgs = append(cmdArgs, "-T", "none") + } + if noUDPNamespacePorts { + cmdArgs = append(cmdArgs, "-U", "none") + } + if noMapGW { + cmdArgs = append(cmdArgs, "--no-map-gw") + } + + // always pass --quiet to silence the info output from pasta + cmdArgs = append(cmdArgs, "--quiet", "--netns", opts.Netns) + + return cmdArgs, dnsForwardIPs, nil +} diff --git a/vendor/github.com/containers/common/libnetwork/resolvconf/resolv.go b/vendor/github.com/containers/common/libnetwork/resolvconf/resolv.go index 0d2b69d259..30b82d8072 100644 --- a/vendor/github.com/containers/common/libnetwork/resolvconf/resolv.go +++ b/vendor/github.com/containers/common/libnetwork/resolvconf/resolv.go @@ -5,12 +5,12 @@ import ( "fmt" "os" "path/filepath" + "slices" "strings" "github.com/containers/storage/pkg/fileutils" "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" - "golang.org/x/exp/slices" ) const ( diff --git a/vendor/github.com/containers/common/libnetwork/util/filters.go b/vendor/github.com/containers/common/libnetwork/util/filters.go index faea9c1ad5..41281e7488 100644 --- a/vendor/github.com/containers/common/libnetwork/util/filters.go +++ b/vendor/github.com/containers/common/libnetwork/util/filters.go @@ -2,12 +2,12 @@ package util import ( "fmt" + "slices" "strings" "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/filters" "github.com/containers/common/pkg/util" - "golang.org/x/exp/slices" ) func GenerateNetworkFilters(f map[string][]string) ([]types.FilterFunc, error) { diff --git a/vendor/github.com/containers/common/pkg/capabilities/capabilities.go b/vendor/github.com/containers/common/pkg/capabilities/capabilities.go index e0b0ac95e8..43fd2c1b58 100644 --- a/vendor/github.com/containers/common/pkg/capabilities/capabilities.go +++ b/vendor/github.com/containers/common/pkg/capabilities/capabilities.go @@ -8,12 +8,12 @@ package capabilities import ( "errors" "fmt" + "slices" "sort" "strings" "sync" "github.com/syndtr/gocapability/capability" - "golang.org/x/exp/slices" ) var ( diff --git a/vendor/github.com/containers/common/pkg/config/config.go b/vendor/github.com/containers/common/pkg/config/config.go index c0e8319ea5..9a3765c263 100644 --- a/vendor/github.com/containers/common/pkg/config/config.go +++ b/vendor/github.com/containers/common/pkg/config/config.go @@ -7,6 +7,7 @@ import ( "os/exec" "path/filepath" "runtime" + "slices" "strings" "github.com/containers/common/internal/attributedstring" @@ -17,13 +18,9 @@ import ( units "github.com/docker/go-units" selinux "github.com/opencontainers/selinux/go-selinux" "github.com/sirupsen/logrus" - "golang.org/x/exp/slices" ) const ( - // _configPath is the path to the containers/containers.conf - // inside a given config directory. - _configPath = "containers/containers.conf" // UserOverrideContainersConfig holds the containers config path overridden by the rootless user UserOverrideContainersConfig = ".config/" + _configPath // Token prefix for looking for helper binary under $BINDIR @@ -1098,10 +1095,10 @@ func (c *Config) FindHelperBinary(name string, searchPATH bool) (string, error) return exec.LookPath(name) } configHint := "To resolve this error, set the helper_binaries_dir key in the `[engine]` section of containers.conf to the directory containing your helper binaries." - if len(c.Engine.HelperBinariesDir.Get()) == 0 { + if len(dirList) == 0 { return "", fmt.Errorf("could not find %q because there are no helper binary directories configured. %s", name, configHint) } - return "", fmt.Errorf("could not find %q in one of %v. %s", name, c.Engine.HelperBinariesDir, configHint) + return "", fmt.Errorf("could not find %q in one of %v. %s", name, dirList, configHint) } // ImageCopyTmpDir default directory to store temporary image files during copy diff --git a/vendor/github.com/containers/common/pkg/config/config_unix.go b/vendor/github.com/containers/common/pkg/config/config_unix.go index bd1652787c..7de35062da 100644 --- a/vendor/github.com/containers/common/pkg/config/config_unix.go +++ b/vendor/github.com/containers/common/pkg/config/config_unix.go @@ -9,6 +9,10 @@ import ( "github.com/containers/storage/pkg/unshare" ) +// _configPath is the path to the containers/containers.conf +// inside a given config directory. +const _configPath = "containers/containers.conf" + // userConfigPath returns the path to the users local config that is // not shared with other users. It uses $XDG_CONFIG_HOME/containers... // if set or $HOME/.config/containers... if not. @@ -23,3 +27,9 @@ func userConfigPath() (string, error) { return filepath.Join(home, UserOverrideContainersConfig), nil } + +// overrideContainersConfigPath returns the default config path overridden +// by the root user +func overrideContainersConfigPath() (string, error) { + return OverrideContainersConfig, nil +} diff --git a/vendor/github.com/containers/common/pkg/config/config_windows.go b/vendor/github.com/containers/common/pkg/config/config_windows.go index 9011687e4d..fbe1bb3f12 100644 --- a/vendor/github.com/containers/common/pkg/config/config_windows.go +++ b/vendor/github.com/containers/common/pkg/config/config_windows.go @@ -3,11 +3,12 @@ package config import "os" const ( - // OverrideContainersConfig holds the default config path overridden by the root user - OverrideContainersConfig = "/etc/" + _configPath + // _configPath is the path to the containers/containers.conf + // inside a given config directory. + _configPath = "containers\\containers.conf" // DefaultContainersConfig holds the default containers config path - DefaultContainersConfig = "/usr/share/" + _configPath + DefaultContainersConfig = "" // DefaultSignaturePolicyPath is the default value for the // policy.json file. @@ -20,7 +21,13 @@ const ( // userConfigPath returns the path to the users local config that is // not shared with other users. It uses $APPDATA/containers... func userConfigPath() (string, error) { - return os.Getenv("APPDATA") + "\\containers\\containers.conf", nil + return os.Getenv("APPDATA") + _configPath, nil +} + +// overrideContainersConfigPath returns the path to the system wide +// containers config folder. It users $PROGRAMDATA/containers... +func overrideContainersConfigPath() (string, error) { + return os.Getenv("ProgramData") + _configPath, nil } var defaultHelperBinariesDir = []string{ diff --git a/vendor/github.com/containers/common/pkg/config/new.go b/vendor/github.com/containers/common/pkg/config/new.go index 51da47db1f..407a685df1 100644 --- a/vendor/github.com/containers/common/pkg/config/new.go +++ b/vendor/github.com/containers/common/pkg/config/new.go @@ -158,16 +158,22 @@ func systemConfigs() (configs []string, finalErr error) { } return append(configs, path), nil } + configs = append(configs, DefaultContainersConfig) - configs = append(configs, OverrideContainersConfig) var err error - configs, err = addConfigs(OverrideContainersConfig+".d", configs) + path, err := overrideContainersConfigPath() + if err != nil { + return nil, err + } + configs = append(configs, path) + + configs, err = addConfigs(path+".d", configs) if err != nil { return nil, err } - path, err := userConfigPath() + path, err = userConfigPath() if err != nil { return nil, err } diff --git a/vendor/github.com/containers/common/pkg/manifests/manifests.go b/vendor/github.com/containers/common/pkg/manifests/manifests.go index 247846a74b..58ed0abf7e 100644 --- a/vendor/github.com/containers/common/pkg/manifests/manifests.go +++ b/vendor/github.com/containers/common/pkg/manifests/manifests.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "os" + "slices" "strings" "github.com/containers/common/internal" @@ -12,7 +13,6 @@ import ( digest "github.com/opencontainers/go-digest" imgspec "github.com/opencontainers/image-spec/specs-go" v1 "github.com/opencontainers/image-spec/specs-go/v1" - "golang.org/x/exp/slices" ) // List is a generic interface for manipulating a manifest list or an image diff --git a/vendor/github.com/containers/common/pkg/secrets/define/secrets.go b/vendor/github.com/containers/common/pkg/secrets/define/secrets.go new file mode 100644 index 0000000000..6fb8c10202 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/secrets/define/secrets.go @@ -0,0 +1,16 @@ +package define + +import ( + "errors" +) + +var ( + // ErrNoSuchSecret indicates that the secret does not exist + ErrNoSuchSecret = errors.New("no such secret") + + // ErrSecretIDExists indicates that there is secret data already associated with an id + ErrSecretIDExists = errors.New("secret data with ID already exists") + + // ErrInvalidKey indicates that something about your key is wrong + ErrInvalidKey = errors.New("invalid key") +) diff --git a/vendor/github.com/containers/common/pkg/secrets/filedriver/filedriver.go b/vendor/github.com/containers/common/pkg/secrets/filedriver/filedriver.go index 48817d0e62..54df4a69e3 100644 --- a/vendor/github.com/containers/common/pkg/secrets/filedriver/filedriver.go +++ b/vendor/github.com/containers/common/pkg/secrets/filedriver/filedriver.go @@ -9,6 +9,7 @@ import ( "path/filepath" "sort" + "github.com/containers/common/pkg/secrets/define" "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/lockfile" "golang.org/x/exp/maps" @@ -17,12 +18,6 @@ import ( // secretsDataFile is the file where secrets data/payload will be stored var secretsDataFile = "secretsdata.json" -// errNoSecretData indicates that there is not data associated with an id -var errNoSecretData = errors.New("no secret data with ID") - -// errNoSecretData indicates that there is secret data already associated with an id -var errSecretIDExists = errors.New("secret data with ID already exists") - // Driver is the filedriver object type Driver struct { // secretsDataFilePath is the path to the secretsfile @@ -75,7 +70,7 @@ func (d *Driver) Lookup(id string) ([]byte, error) { if data, ok := secretData[id]; ok { return data, nil } - return nil, fmt.Errorf("%s: %w", id, errNoSecretData) + return nil, fmt.Errorf("%s: %w", id, define.ErrNoSuchSecret) } // Store stores the bytes associated with an ID. An error is returned if the ID already exists @@ -88,7 +83,7 @@ func (d *Driver) Store(id string, data []byte) error { return err } if _, ok := secretData[id]; ok { - return fmt.Errorf("%s: %w", id, errSecretIDExists) + return fmt.Errorf("%s: %w", id, define.ErrSecretIDExists) } secretData[id] = data marshalled, err := json.MarshalIndent(secretData, "", " ") @@ -113,7 +108,7 @@ func (d *Driver) Delete(id string) error { if _, ok := secretData[id]; ok { delete(secretData, id) } else { - return fmt.Errorf("%s: %w", id, errNoSecretData) + return fmt.Errorf("%s: %w", id, define.ErrNoSuchSecret) } marshalled, err := json.MarshalIndent(secretData, "", " ") if err != nil { diff --git a/vendor/github.com/containers/common/pkg/secrets/passdriver/passdriver.go b/vendor/github.com/containers/common/pkg/secrets/passdriver/passdriver.go index d6a4f63a60..b99541faa5 100644 --- a/vendor/github.com/containers/common/pkg/secrets/passdriver/passdriver.go +++ b/vendor/github.com/containers/common/pkg/secrets/passdriver/passdriver.go @@ -12,20 +12,10 @@ import ( "sort" "strings" + "github.com/containers/common/pkg/secrets/define" "github.com/containers/storage/pkg/fileutils" ) -var ( - // errNoSecretData indicates that there is not data associated with an id - errNoSecretData = errors.New("no secret data with ID") - - // errNoSecretData indicates that there is secret data already associated with an id - errSecretIDExists = errors.New("secret data with ID already exists") - - // errInvalidKey indicates that something about your key is wrong - errInvalidKey = errors.New("invalid key") -) - type driverConfig struct { // Root contains the root directory where the secrets are stored Root string @@ -128,10 +118,10 @@ func (d *Driver) Lookup(id string) ([]byte, error) { return nil, err } if err := d.gpg(context.TODO(), nil, out, "--decrypt", key); err != nil { - return nil, fmt.Errorf("%s: %w", id, errNoSecretData) + return nil, fmt.Errorf("%s: %w", id, define.ErrNoSuchSecret) } if out.Len() == 0 { - return nil, fmt.Errorf("%s: %w", id, errNoSecretData) + return nil, fmt.Errorf("%s: %w", id, define.ErrNoSuchSecret) } return out.Bytes(), nil } @@ -139,7 +129,7 @@ func (d *Driver) Lookup(id string) ([]byte, error) { // Store saves the bytes associated with an ID. An error is returned if the ID already exists func (d *Driver) Store(id string, data []byte) error { if _, err := d.Lookup(id); err == nil { - return fmt.Errorf("%s: %w", id, errSecretIDExists) + return fmt.Errorf("%s: %w", id, define.ErrSecretIDExists) } in := bytes.NewReader(data) key, err := d.getPath(id) @@ -156,7 +146,10 @@ func (d *Driver) Delete(id string) error { return err } if err := os.Remove(key); err != nil { - return fmt.Errorf("%s: %w", id, errNoSecretData) + if errors.Is(err, os.ErrNotExist) { + return fmt.Errorf("%s: %w", id, define.ErrNoSuchSecret) + } + return fmt.Errorf("%s: %w", id, err) } return nil } @@ -176,10 +169,10 @@ func (d *Driver) gpg(ctx context.Context, in io.Reader, out io.Writer, args ...s func (d *Driver) getPath(id string) (string, error) { path, err := filepath.Abs(filepath.Join(d.Root, id)) if err != nil { - return "", errInvalidKey + return "", define.ErrInvalidKey } if !strings.HasPrefix(path, d.Root) { - return "", errInvalidKey + return "", define.ErrInvalidKey } return path + ".gpg", nil } diff --git a/vendor/github.com/containers/common/pkg/secrets/secrets.go b/vendor/github.com/containers/common/pkg/secrets/secrets.go index 7092c8f2b2..8ffcc738bd 100644 --- a/vendor/github.com/containers/common/pkg/secrets/secrets.go +++ b/vendor/github.com/containers/common/pkg/secrets/secrets.go @@ -7,6 +7,7 @@ import ( "path/filepath" "time" + "github.com/containers/common/pkg/secrets/define" "github.com/containers/common/pkg/secrets/filedriver" "github.com/containers/common/pkg/secrets/passdriver" "github.com/containers/common/pkg/secrets/shelldriver" @@ -26,7 +27,7 @@ const secretIDLength = 25 var errInvalidPath = errors.New("invalid secrets path") // ErrNoSuchSecret indicates that the secret does not exist -var ErrNoSuchSecret = errors.New("no such secret") +var ErrNoSuchSecret = define.ErrNoSuchSecret // errSecretNameInUse indicates that the secret name is already in use var errSecretNameInUse = errors.New("secret name in use") @@ -151,7 +152,7 @@ func (s *SecretsManager) newID() (string, error) { newID = newID[0:secretIDLength] _, err := s.lookupSecret(newID) if err != nil { - if errors.Is(err, ErrNoSuchSecret) { + if errors.Is(err, define.ErrNoSuchSecret) { return newID, nil } return "", err @@ -217,12 +218,14 @@ func (s *SecretsManager) Store(name string, data []byte, driverType string, opti } if options.Replace { - if err := driver.Delete(secr.ID); err != nil { + if err := driver.Delete(secr.ID); err != nil && !errors.Is(err, define.ErrNoSuchSecret) { return "", fmt.Errorf("deleting secret %s: %w", secr.ID, err) } - if err := s.delete(secr.ID); err != nil { - return "", fmt.Errorf("deleting secret %s: %w", secr.ID, err) + if err == nil { + if err := s.delete(secr.ID); err != nil && !errors.Is(err, define.ErrNoSuchSecret) { + return "", fmt.Errorf("deleting secret %s: %w", secr.ID, err) + } } } diff --git a/vendor/github.com/containers/common/pkg/secrets/secretsdb.go b/vendor/github.com/containers/common/pkg/secrets/secretsdb.go index cc86c4501e..fb8201f590 100644 --- a/vendor/github.com/containers/common/pkg/secrets/secretsdb.go +++ b/vendor/github.com/containers/common/pkg/secrets/secretsdb.go @@ -8,6 +8,8 @@ import ( "os" "strings" "time" + + "github.com/containers/common/pkg/secrets/define" ) type db struct { @@ -70,14 +72,14 @@ func (s *SecretsManager) getNameAndID(nameOrID string) (name, id string, err err name, id, err = s.getExactNameAndID(nameOrID) if err == nil { return name, id, nil - } else if !errors.Is(err, ErrNoSuchSecret) { + } else if !errors.Is(err, define.ErrNoSuchSecret) { return "", "", err } // ID prefix may have been given, iterate through all IDs. // ID and partial ID has a max length of 25, so we return if its greater than that. if len(nameOrID) > secretIDLength { - return "", "", fmt.Errorf("no secret with name or id %q: %w", nameOrID, ErrNoSuchSecret) + return "", "", fmt.Errorf("no secret with name or id %q: %w", nameOrID, define.ErrNoSuchSecret) } exists := false var foundID, foundName string @@ -95,7 +97,7 @@ func (s *SecretsManager) getNameAndID(nameOrID string) (name, id string, err err if exists { return foundName, foundID, nil } - return "", "", fmt.Errorf("no secret with name or id %q: %w", nameOrID, ErrNoSuchSecret) + return "", "", fmt.Errorf("no secret with name or id %q: %w", nameOrID, define.ErrNoSuchSecret) } // getExactNameAndID takes a secret's name or ID and returns both its name and full ID. @@ -114,7 +116,7 @@ func (s *SecretsManager) getExactNameAndID(nameOrID string) (name, id string, er return name, id, nil } - return "", "", fmt.Errorf("no secret with name or id %q: %w", nameOrID, ErrNoSuchSecret) + return "", "", fmt.Errorf("no secret with name or id %q: %w", nameOrID, define.ErrNoSuchSecret) } // exactSecretExists checks if the secret exists, given a name or ID @@ -122,7 +124,7 @@ func (s *SecretsManager) getExactNameAndID(nameOrID string) (name, id string, er func (s *SecretsManager) exactSecretExists(nameOrID string) (bool, error) { _, _, err := s.getExactNameAndID(nameOrID) if err != nil { - if errors.Is(err, ErrNoSuchSecret) { + if errors.Is(err, define.ErrNoSuchSecret) { return false, nil } return false, err @@ -157,7 +159,7 @@ func (s *SecretsManager) lookupSecret(nameOrID string) (*Secret, error) { return &secret, nil } - return nil, fmt.Errorf("no secret with name or id %q: %w", nameOrID, ErrNoSuchSecret) + return nil, fmt.Errorf("no secret with name or id %q: %w", nameOrID, define.ErrNoSuchSecret) } // Store creates a new secret in the secrets database. diff --git a/vendor/github.com/containers/common/pkg/secrets/shelldriver/shelldriver.go b/vendor/github.com/containers/common/pkg/secrets/shelldriver/shelldriver.go index c903ca7491..262ee5cfc5 100644 --- a/vendor/github.com/containers/common/pkg/secrets/shelldriver/shelldriver.go +++ b/vendor/github.com/containers/common/pkg/secrets/shelldriver/shelldriver.go @@ -9,20 +9,13 @@ import ( "os/exec" "sort" "strings" -) - -var ( - - // errMissingConfig indicates that one or more of the external actions are not configured - errMissingConfig = errors.New("missing config value") - // errNoSecretData indicates that there is not data associated with an id - errNoSecretData = errors.New("no secret data with ID") - - // errInvalidKey indicates that something about your key is wrong - errInvalidKey = errors.New("invalid key") + "github.com/containers/common/pkg/secrets/define" ) +// errMissingConfig indicates that one or more of the external actions are not configured +var errMissingConfig = errors.New("missing config value") + type driverConfig struct { // DeleteCommand contains a shell command that deletes a secret. // The secret id is provided as environment variable SECRET_ID @@ -111,7 +104,7 @@ func (d *Driver) List() (secrets []string, err error) { // Lookup returns the bytes associated with a secret ID func (d *Driver) Lookup(id string) ([]byte, error) { if strings.Contains(id, "..") { - return nil, errInvalidKey + return nil, define.ErrInvalidKey } cmd := exec.CommandContext(context.TODO(), "/bin/sh", "-c", d.LookupCommand) @@ -124,7 +117,7 @@ func (d *Driver) Lookup(id string) ([]byte, error) { err := cmd.Run() if err != nil { - return nil, fmt.Errorf("%s: %w", id, errNoSecretData) + return nil, fmt.Errorf("%s: %w", id, define.ErrNoSuchSecret) } return buf.Bytes(), nil } @@ -132,7 +125,7 @@ func (d *Driver) Lookup(id string) ([]byte, error) { // Store saves the bytes associated with an ID. An error is returned if the ID already exists func (d *Driver) Store(id string, data []byte) error { if strings.Contains(id, "..") { - return errInvalidKey + return define.ErrInvalidKey } cmd := exec.CommandContext(context.TODO(), "/bin/sh", "-c", d.StoreCommand) @@ -149,7 +142,7 @@ func (d *Driver) Store(id string, data []byte) error { // Delete removes the secret associated with the specified ID. An error is returned if no matching secret is found. func (d *Driver) Delete(id string) error { if strings.Contains(id, "..") { - return errInvalidKey + return define.ErrInvalidKey } cmd := exec.CommandContext(context.TODO(), "/bin/sh", "-c", d.DeleteCommand) @@ -161,7 +154,7 @@ func (d *Driver) Delete(id string) error { err := cmd.Run() if err != nil { - return fmt.Errorf("%s: %w", id, errNoSecretData) + return fmt.Errorf("%s: %w", id, define.ErrNoSuchSecret) } return nil diff --git a/vendor/github.com/containers/common/pkg/supplemented/supplemented.go b/vendor/github.com/containers/common/pkg/supplemented/supplemented.go index c21a6aa522..69e24ad764 100644 --- a/vendor/github.com/containers/common/pkg/supplemented/supplemented.go +++ b/vendor/github.com/containers/common/pkg/supplemented/supplemented.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" "io" + "slices" cp "github.com/containers/image/v5/copy" "github.com/containers/image/v5/image" @@ -15,7 +16,6 @@ import ( multierror "github.com/hashicorp/go-multierror" digest "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" - "golang.org/x/exp/slices" ) // supplementedImageReference groups multiple references together. diff --git a/vendor/github.com/containers/common/pkg/util/util.go b/vendor/github.com/containers/common/pkg/util/util.go index 5a30107c78..1e51670189 100644 --- a/vendor/github.com/containers/common/pkg/util/util.go +++ b/vendor/github.com/containers/common/pkg/util/util.go @@ -5,12 +5,12 @@ import ( "os" "path/filepath" "regexp" + "slices" "time" "github.com/containers/storage/pkg/fileutils" "github.com/fsnotify/fsnotify" "github.com/sirupsen/logrus" - "golang.org/x/exp/slices" ) // StringInSlice determines if a string is in a string slice, returns bool. diff --git a/vendor/github.com/containers/image/v5/copy/compression.go b/vendor/github.com/containers/image/v5/copy/compression.go index 89c3440bef..52556304f6 100644 --- a/vendor/github.com/containers/image/v5/copy/compression.go +++ b/vendor/github.com/containers/image/v5/copy/compression.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "io" + "maps" internalblobinfocache "github.com/containers/image/v5/internal/blobinfocache" "github.com/containers/image/v5/manifest" @@ -12,7 +13,6 @@ import ( "github.com/containers/image/v5/types" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" - "golang.org/x/exp/maps" ) var ( diff --git a/vendor/github.com/containers/image/v5/copy/copy.go b/vendor/github.com/containers/image/v5/copy/copy.go index ad1453fcbc..996a4e2d7a 100644 --- a/vendor/github.com/containers/image/v5/copy/copy.go +++ b/vendor/github.com/containers/image/v5/copy/copy.go @@ -6,6 +6,7 @@ import ( "fmt" "io" "os" + "slices" "time" "github.com/containers/image/v5/docker/reference" @@ -25,7 +26,6 @@ import ( encconfig "github.com/containers/ocicrypt/config" digest "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" - "golang.org/x/exp/slices" "golang.org/x/sync/semaphore" "golang.org/x/term" ) diff --git a/vendor/github.com/containers/image/v5/copy/encryption.go b/vendor/github.com/containers/image/v5/copy/encryption.go index 1305676d7a..523db5fa0b 100644 --- a/vendor/github.com/containers/image/v5/copy/encryption.go +++ b/vendor/github.com/containers/image/v5/copy/encryption.go @@ -2,13 +2,13 @@ package copy import ( "fmt" + "maps" + "slices" "strings" "github.com/containers/image/v5/types" "github.com/containers/ocicrypt" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "golang.org/x/exp/maps" - "golang.org/x/exp/slices" ) // isOciEncrypted returns a bool indicating if a mediatype is encrypted diff --git a/vendor/github.com/containers/image/v5/copy/manifest.go b/vendor/github.com/containers/image/v5/copy/manifest.go index 60ea92aae1..12ccbad98b 100644 --- a/vendor/github.com/containers/image/v5/copy/manifest.go +++ b/vendor/github.com/containers/image/v5/copy/manifest.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "slices" "strings" internalManifest "github.com/containers/image/v5/internal/manifest" @@ -13,7 +14,6 @@ import ( "github.com/containers/image/v5/types" v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" - "golang.org/x/exp/slices" ) // preferredManifestMIMETypes lists manifest MIME types in order of our preference, if we can't use the original manifest and need to convert. diff --git a/vendor/github.com/containers/image/v5/copy/multiple.go b/vendor/github.com/containers/image/v5/copy/multiple.go index a219b58b6d..009a067ced 100644 --- a/vendor/github.com/containers/image/v5/copy/multiple.go +++ b/vendor/github.com/containers/image/v5/copy/multiple.go @@ -5,6 +5,8 @@ import ( "context" "errors" "fmt" + "maps" + "slices" "sort" "strings" @@ -17,8 +19,6 @@ import ( digest "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" - "golang.org/x/exp/maps" - "golang.org/x/exp/slices" ) type instanceCopyKind int diff --git a/vendor/github.com/containers/image/v5/copy/single.go b/vendor/github.com/containers/image/v5/copy/single.go index e1a43f75e1..796a550579 100644 --- a/vendor/github.com/containers/image/v5/copy/single.go +++ b/vendor/github.com/containers/image/v5/copy/single.go @@ -7,6 +7,7 @@ import ( "fmt" "io" "reflect" + "slices" "strings" "sync" @@ -25,7 +26,6 @@ import ( imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" "github.com/vbauerster/mpb/v8" - "golang.org/x/exp/slices" ) // imageCopier tracks state specific to a single image (possibly an item of a manifest list) @@ -704,7 +704,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to canChangeLayerCompression := ic.src.CanChangeLayerCompression(srcInfo.MediaType) logrus.Debugf("Checking if we can reuse blob %s: general substitution = %v, compression for MIME type %q = %v", srcInfo.Digest, ic.canSubstituteBlobs, srcInfo.MediaType, canChangeLayerCompression) - canSubstitute := ic.canSubstituteBlobs && ic.src.CanChangeLayerCompression(srcInfo.MediaType) + canSubstitute := ic.canSubstituteBlobs && canChangeLayerCompression var requiredCompression *compressiontypes.Algorithm if ic.requireCompressionFormatMatch { diff --git a/vendor/github.com/containers/image/v5/docker/distribution_error.go b/vendor/github.com/containers/image/v5/docker/distribution_error.go index 11b42c6e00..0a0064576a 100644 --- a/vendor/github.com/containers/image/v5/docker/distribution_error.go +++ b/vendor/github.com/containers/image/v5/docker/distribution_error.go @@ -21,10 +21,10 @@ import ( "fmt" "io" "net/http" + "slices" "github.com/docker/distribution/registry/api/errcode" dockerChallenge "github.com/docker/distribution/registry/client/auth/challenge" - "golang.org/x/exp/slices" ) // errNoErrorsInBody is returned when an HTTP response body parses to an empty diff --git a/vendor/github.com/containers/image/v5/docker/docker_client.go b/vendor/github.com/containers/image/v5/docker/docker_client.go index 932fb971ad..f62ca5f2a2 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_client.go +++ b/vendor/github.com/containers/image/v5/docker/docker_client.go @@ -599,10 +599,7 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method stri // close response body before retry or context done res.Body.Close() - delay = parseRetryAfter(res, delay) - if delay > backoffMaxDelay { - delay = backoffMaxDelay - } + delay = min(parseRetryAfter(res, delay), backoffMaxDelay) logrus.Debugf("Too many requests to %s: sleeping for %f seconds before next attempt", requestURL.Redacted(), delay.Seconds()) select { case <-ctx.Done(): diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go index 9ba7ff4c47..f5816bee72 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go @@ -8,10 +8,12 @@ import ( "errors" "fmt" "io" + "maps" "net/http" "net/url" "os" "path/filepath" + "slices" "strings" "github.com/containers/image/v5/docker/reference" @@ -34,8 +36,6 @@ import ( "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" - "golang.org/x/exp/maps" - "golang.org/x/exp/slices" ) type dockerImageDestination struct { diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go index df7b2c0906..dd82e9b19c 100644 --- a/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go @@ -9,6 +9,7 @@ import ( "io" "os" "path/filepath" + "slices" "sync" "time" @@ -19,7 +20,6 @@ import ( "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" - "golang.org/x/exp/slices" ) // Writer allows creating a (docker save)-formatted tar archive containing one or more images. diff --git a/vendor/github.com/containers/image/v5/internal/image/oci.go b/vendor/github.com/containers/image/v5/internal/image/oci.go index df0e8e4171..aaef95ff3d 100644 --- a/vendor/github.com/containers/image/v5/internal/image/oci.go +++ b/vendor/github.com/containers/image/v5/internal/image/oci.go @@ -5,6 +5,7 @@ import ( "encoding/json" "errors" "fmt" + "slices" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/internal/iolimits" @@ -15,7 +16,6 @@ import ( ociencspec "github.com/containers/ocicrypt/spec" "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "golang.org/x/exp/slices" ) type manifestOCI1 struct { diff --git a/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go b/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go index 7ce5bb0696..c6e935d0a1 100644 --- a/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go +++ b/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go @@ -3,13 +3,13 @@ package manifest import ( "encoding/json" "fmt" + "slices" platform "github.com/containers/image/v5/internal/pkg/platform" compression "github.com/containers/image/v5/pkg/compression/types" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "golang.org/x/exp/slices" ) // Schema2PlatformSpec describes the platform which a particular manifest is diff --git a/vendor/github.com/containers/image/v5/internal/manifest/manifest.go b/vendor/github.com/containers/image/v5/internal/manifest/manifest.go index 3556902e4b..ee0ddc772a 100644 --- a/vendor/github.com/containers/image/v5/internal/manifest/manifest.go +++ b/vendor/github.com/containers/image/v5/internal/manifest/manifest.go @@ -2,12 +2,12 @@ package manifest import ( "encoding/json" + "slices" compressiontypes "github.com/containers/image/v5/pkg/compression/types" "github.com/containers/libtrust" digest "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "golang.org/x/exp/slices" ) // FIXME: Should we just use docker/distribution and docker/docker implementations directly? diff --git a/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go b/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go index 829852a836..07441c06e4 100644 --- a/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go +++ b/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go @@ -3,8 +3,10 @@ package manifest import ( "encoding/json" "fmt" + "maps" "math" "runtime" + "slices" platform "github.com/containers/image/v5/internal/pkg/platform" compression "github.com/containers/image/v5/pkg/compression/types" @@ -12,8 +14,6 @@ import ( "github.com/opencontainers/go-digest" imgspec "github.com/opencontainers/image-spec/specs-go" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "golang.org/x/exp/maps" - "golang.org/x/exp/slices" ) const ( diff --git a/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go b/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go index 94002d6d47..912af4d90e 100644 --- a/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go +++ b/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go @@ -21,12 +21,12 @@ import ( "fmt" "os" "runtime" + "slices" "strings" "github.com/containers/image/v5/types" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" - "golang.org/x/exp/slices" ) // For Linux, the kernel has already detected the ABI, ISA and Features. diff --git a/vendor/github.com/containers/image/v5/internal/signature/sigstore.go b/vendor/github.com/containers/image/v5/internal/signature/sigstore.go index 6eaeb68d19..8025cd2700 100644 --- a/vendor/github.com/containers/image/v5/internal/signature/sigstore.go +++ b/vendor/github.com/containers/image/v5/internal/signature/sigstore.go @@ -3,8 +3,7 @@ package signature import ( "bytes" "encoding/json" - - "golang.org/x/exp/maps" + "maps" ) const ( diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema1.go b/vendor/github.com/containers/image/v5/manifest/docker_schema1.go index 762815570c..4e520f1fd0 100644 --- a/vendor/github.com/containers/image/v5/manifest/docker_schema1.go +++ b/vendor/github.com/containers/image/v5/manifest/docker_schema1.go @@ -4,6 +4,7 @@ import ( "encoding/json" "errors" "fmt" + "slices" "strings" "time" @@ -15,7 +16,6 @@ import ( "github.com/containers/storage/pkg/regexp" "github.com/docker/docker/api/types/versions" "github.com/opencontainers/go-digest" - "golang.org/x/exp/slices" ) // Schema1FSLayers is an entry of the "fsLayers" array in docker/distribution schema 1. diff --git a/vendor/github.com/containers/image/v5/manifest/oci.go b/vendor/github.com/containers/image/v5/manifest/oci.go index 51943ed1b0..cc68ef7653 100644 --- a/vendor/github.com/containers/image/v5/manifest/oci.go +++ b/vendor/github.com/containers/image/v5/manifest/oci.go @@ -3,6 +3,7 @@ package manifest import ( "encoding/json" "fmt" + "slices" "strings" "github.com/containers/image/v5/internal/manifest" @@ -12,7 +13,6 @@ import ( "github.com/opencontainers/go-digest" "github.com/opencontainers/image-spec/specs-go" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "golang.org/x/exp/slices" ) // BlobInfoFromOCI1Descriptor returns a types.BlobInfo based on the input OCI1 descriptor. diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_delete.go b/vendor/github.com/containers/image/v5/oci/layout/oci_delete.go index 8dd54f255a..bcf257df67 100644 --- a/vendor/github.com/containers/image/v5/oci/layout/oci_delete.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_delete.go @@ -6,13 +6,13 @@ import ( "fmt" "io/fs" "os" + "slices" "github.com/containers/image/v5/internal/set" "github.com/containers/image/v5/types" digest "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" - "golang.org/x/exp/slices" ) // DeleteImage deletes the named image from the directory, if supported. diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go index e46c67b516..a096afe0f1 100644 --- a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go @@ -10,6 +10,7 @@ import ( "os" "path/filepath" "runtime" + "slices" "github.com/containers/image/v5/internal/imagedestination/impl" "github.com/containers/image/v5/internal/imagedestination/stubs" @@ -21,7 +22,6 @@ import ( digest "github.com/opencontainers/go-digest" imgspec "github.com/opencontainers/image-spec/specs-go" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "golang.org/x/exp/slices" ) type ociImageDestination struct { diff --git a/vendor/github.com/containers/image/v5/openshift/openshift-copies.go b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go index 5cce06806f..04d55ac11a 100644 --- a/vendor/github.com/containers/image/v5/openshift/openshift-copies.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go @@ -14,6 +14,7 @@ import ( "path" "path/filepath" "reflect" + "slices" "strings" "time" @@ -21,7 +22,6 @@ import ( "github.com/containers/image/v5/internal/multierr" "github.com/containers/storage/pkg/homedir" "github.com/sirupsen/logrus" - "golang.org/x/exp/slices" "gopkg.in/yaml.v3" ) diff --git a/vendor/github.com/containers/image/v5/openshift/openshift_dest.go b/vendor/github.com/containers/image/v5/openshift/openshift_dest.go index 656f4518d1..4170d6e208 100644 --- a/vendor/github.com/containers/image/v5/openshift/openshift_dest.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift_dest.go @@ -9,6 +9,7 @@ import ( "fmt" "io" "net/http" + "slices" "github.com/containers/image/v5/docker" "github.com/containers/image/v5/docker/reference" @@ -21,7 +22,6 @@ import ( "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" - "golang.org/x/exp/slices" ) type openshiftImageDestination struct { diff --git a/vendor/github.com/containers/image/v5/pkg/blobcache/dest.go b/vendor/github.com/containers/image/v5/pkg/blobcache/dest.go index 663df11dd9..fb2efe4c4b 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobcache/dest.go +++ b/vendor/github.com/containers/image/v5/pkg/blobcache/dest.go @@ -79,47 +79,55 @@ func (d *blobCacheDestination) IgnoresEmbeddedDockerReference() bool { // and this new file. func (d *blobCacheDestination) saveStream(wg *sync.WaitGroup, decompressReader io.ReadCloser, tempFile *os.File, compressedFilename string, compressedDigest digest.Digest, isConfig bool, alternateDigest *digest.Digest) { defer wg.Done() - // Decompress from and digest the reading end of that pipe. - decompressed, err3 := archive.DecompressStream(decompressReader) + defer decompressReader.Close() + + succeeded := false + defer func() { + if !succeeded { + // Remove the temporary file. + if err := os.Remove(tempFile.Name()); err != nil { + logrus.Debugf("error cleaning up temporary file %q for decompressed copy of blob %q: %v", tempFile.Name(), compressedDigest.String(), err) + } + } + }() + digester := digest.Canonical.Digester() - if err3 == nil { + if err := func() error { // A scope for defer + defer tempFile.Close() + + // Decompress from and digest the reading end of that pipe. + decompressed, err := archive.DecompressStream(decompressReader) + if err != nil { + // Drain the pipe to keep from stalling the PutBlob() thread. + if _, err2 := io.Copy(io.Discard, decompressReader); err2 != nil { + logrus.Debugf("error draining the pipe: %v", err2) + } + return err + } + defer decompressed.Close() // Read the decompressed data through the filter over the pipe, blocking until the // writing end is closed. - _, err3 = io.Copy(io.MultiWriter(tempFile, digester.Hash()), decompressed) - } else { - // Drain the pipe to keep from stalling the PutBlob() thread. - if _, err := io.Copy(io.Discard, decompressReader); err != nil { - logrus.Debugf("error draining the pipe: %v", err) - } + _, err = io.Copy(io.MultiWriter(tempFile, digester.Hash()), decompressed) + return err + }(); err != nil { + return } - decompressReader.Close() - decompressed.Close() - tempFile.Close() + // Determine the name that we should give to the uncompressed copy of the blob. decompressedFilename := d.reference.blobPath(digester.Digest(), isConfig) - if err3 == nil { - // Rename the temporary file. - if err3 = os.Rename(tempFile.Name(), decompressedFilename); err3 != nil { - logrus.Debugf("error renaming new decompressed copy of blob %q into place at %q: %v", digester.Digest().String(), decompressedFilename, err3) - // Remove the temporary file. - if err3 = os.Remove(tempFile.Name()); err3 != nil { - logrus.Debugf("error cleaning up temporary file %q for decompressed copy of blob %q: %v", tempFile.Name(), compressedDigest.String(), err3) - } - } else { - *alternateDigest = digester.Digest() - // Note the relationship between the two files. - if err3 = ioutils.AtomicWriteFile(decompressedFilename+compressedNote, []byte(compressedDigest.String()), 0600); err3 != nil { - logrus.Debugf("error noting that the compressed version of %q is %q: %v", digester.Digest().String(), compressedDigest.String(), err3) - } - if err3 = ioutils.AtomicWriteFile(compressedFilename+decompressedNote, []byte(digester.Digest().String()), 0600); err3 != nil { - logrus.Debugf("error noting that the decompressed version of %q is %q: %v", compressedDigest.String(), digester.Digest().String(), err3) - } - } - } else { - // Remove the temporary file. - if err3 = os.Remove(tempFile.Name()); err3 != nil { - logrus.Debugf("error cleaning up temporary file %q for decompressed copy of blob %q: %v", tempFile.Name(), compressedDigest.String(), err3) - } + // Rename the temporary file. + if err := os.Rename(tempFile.Name(), decompressedFilename); err != nil { + logrus.Debugf("error renaming new decompressed copy of blob %q into place at %q: %v", digester.Digest().String(), decompressedFilename, err) + return + } + succeeded = true + *alternateDigest = digester.Digest() + // Note the relationship between the two files. + if err := ioutils.AtomicWriteFile(decompressedFilename+compressedNote, []byte(compressedDigest.String()), 0600); err != nil { + logrus.Debugf("error noting that the compressed version of %q is %q: %v", digester.Digest().String(), compressedDigest.String(), err) + } + if err := ioutils.AtomicWriteFile(compressedFilename+decompressedNote, []byte(digester.Digest().String()), 0600); err != nil { + logrus.Debugf("error noting that the decompressed version of %q is %q: %v", compressedDigest.String(), digester.Digest().String(), err) } } diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go index 97937ccc6f..9cd9c8f7d3 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go @@ -3,6 +3,8 @@ package prioritize import ( + "cmp" + "slices" "time" "github.com/containers/image/v5/internal/blobinfocache" @@ -11,7 +13,6 @@ import ( "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" - "golang.org/x/exp/slices" ) // replacementAttempts is the number of blob replacement candidates with known location returned by destructivelyPrioritizeReplacementCandidates, @@ -118,22 +119,7 @@ func (css *candidateSortState) compare(xi, xj CandidateWithTime) int { return -cmp } // Fall back to digest, if timestamps end up _exactly_ the same (how?!) - // FIXME: Use cmp.Compare after we update to Go 1.21. - switch { - case xi.Candidate.Digest < xj.Candidate.Digest: - return -1 - case xi.Candidate.Digest > xj.Candidate.Digest: - return 1 - default: - return 0 - } -} - -func min(a, b int) int { - if a < b { - return a - } - return b + return cmp.Compare(xi.Candidate.Digest, xj.Candidate.Digest) } // destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with parameters for the @@ -161,7 +147,7 @@ func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, knownLocationCandidatesUsed := min(len(knownLocationCandidates), totalLimit) remainingCapacity := totalLimit - knownLocationCandidatesUsed - unknownLocationCandidatesUsed := min(noLocationLimit, min(remainingCapacity, len(unknownLocationCandidates))) + unknownLocationCandidatesUsed := min(noLocationLimit, remainingCapacity, len(unknownLocationCandidates)) res := make([]blobinfocache.BICReplacementCandidate2, knownLocationCandidatesUsed) for i := 0; i < knownLocationCandidatesUsed; i++ { res[i] = knownLocationCandidates[i].Candidate diff --git a/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go b/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go index 17e30bc7b9..61490932e5 100644 --- a/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go +++ b/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "os" + "slices" "strings" "github.com/containers/image/v5/docker/reference" @@ -12,7 +13,6 @@ import ( "github.com/containers/image/v5/types" "github.com/manifoldco/promptui" "github.com/opencontainers/go-digest" - "golang.org/x/exp/slices" "golang.org/x/term" ) diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go index 13f6dfd7fd..71f5bc8378 100644 --- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go +++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go @@ -2,6 +2,7 @@ package sysregistriesv2 import ( "fmt" + "maps" "os" "path/filepath" "reflect" @@ -15,7 +16,6 @@ import ( "github.com/containers/storage/pkg/homedir" "github.com/containers/storage/pkg/lockfile" "github.com/sirupsen/logrus" - "golang.org/x/exp/maps" ) // defaultShortNameMode is the default mode of registries.conf files if the diff --git a/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go b/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go index 0bc73a36f6..f6c0576e07 100644 --- a/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go +++ b/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go @@ -8,11 +8,11 @@ import ( "net/http" "os" "path/filepath" + "slices" "strings" "time" "github.com/sirupsen/logrus" - "golang.org/x/exp/slices" ) // SetupCertificates opens all .crt, .cert, and .key files in dir and appends / loads certs and key pairs as appropriate to tlsc diff --git a/vendor/github.com/containers/image/v5/signature/docker.go b/vendor/github.com/containers/image/v5/signature/docker.go index d6075f8110..a116ea30be 100644 --- a/vendor/github.com/containers/image/v5/signature/docker.go +++ b/vendor/github.com/containers/image/v5/signature/docker.go @@ -5,13 +5,13 @@ package signature import ( "errors" "fmt" + "slices" "strings" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/signature/internal" "github.com/opencontainers/go-digest" - "golang.org/x/exp/slices" ) // SignOptions includes optional parameters for signing container images. diff --git a/vendor/github.com/containers/image/v5/signature/fulcio_cert.go b/vendor/github.com/containers/image/v5/signature/fulcio_cert.go index c11fa46a9d..9d43032b1a 100644 --- a/vendor/github.com/containers/image/v5/signature/fulcio_cert.go +++ b/vendor/github.com/containers/image/v5/signature/fulcio_cert.go @@ -10,12 +10,12 @@ import ( "encoding/asn1" "errors" "fmt" + "slices" "time" "github.com/containers/image/v5/signature/internal" "github.com/sigstore/fulcio/pkg/certificate" "github.com/sigstore/sigstore/pkg/cryptoutils" - "golang.org/x/exp/slices" ) // fulcioTrustRoot contains policy allow validating Fulcio-issued certificates. diff --git a/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go b/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go index 50243da334..e79c91cf99 100644 --- a/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go +++ b/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go @@ -220,7 +220,7 @@ func VerifyRekorSET(publicKey *ecdsa.PublicKey, unverifiedRekorSET []byte, unver return time.Time{}, NewInvalidSignatureError(`Missing "data.hash.algorithm" field in hashedrekord`) } // FIXME: Rekor 1.3.5 has added SHA-386 and SHA-512 as recognized values. - // Eventually we should support them as well; doing that cleanly would require updqating to Rekor 1.3.5, which requires Go 1.21. + // Eventually we should support them as well. // Short-term, Cosign (as of 2024-02 and Cosign 2.2.3) only produces and accepts SHA-256, so right now that’s not a compatibility // issue. if *hashedRekordV001.Data.Hash.Algorithm != models.HashedrekordV001SchemaDataHashAlgorithmSha256 { diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go b/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go index 5688678745..465644d17f 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go +++ b/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go @@ -7,12 +7,12 @@ import ( "errors" "fmt" "os" + "slices" "github.com/containers/image/v5/internal/multierr" "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/manifest" digest "github.com/opencontainers/go-digest" - "golang.org/x/exp/slices" ) func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { diff --git a/vendor/github.com/containers/image/v5/storage/storage_dest.go b/vendor/github.com/containers/image/v5/storage/storage_dest.go index b15c9c3b55..801f7f414a 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_dest.go +++ b/vendor/github.com/containers/image/v5/storage/storage_dest.go @@ -12,6 +12,7 @@ import ( "io" "os" "path/filepath" + "slices" "sync" "sync/atomic" @@ -34,7 +35,6 @@ import ( digest "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" - "golang.org/x/exp/slices" ) var ( @@ -930,7 +930,6 @@ func (s *storageImageDestination) untrustedLayerDiffID(layerIndex int) (digest.D // nothing is writing to s.manifest yet, or PutManifest has been called and s.manifest != nil. // Either way this function does not need the protection of s.lock. if s.manifest == nil { - logrus.Debugf("Skipping commit for layer %d, manifest not yet available", layerIndex) return "", nil } diff --git a/vendor/github.com/containers/image/v5/storage/storage_reference.go b/vendor/github.com/containers/image/v5/storage/storage_reference.go index a55e34054a..2c54763e08 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_reference.go +++ b/vendor/github.com/containers/image/v5/storage/storage_reference.go @@ -6,6 +6,7 @@ package storage import ( "context" "fmt" + "slices" "strings" "github.com/containers/image/v5/docker/reference" @@ -15,7 +16,6 @@ import ( "github.com/containers/storage" digest "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" - "golang.org/x/exp/slices" ) // A storageReference holds an arbitrary name and/or an ID, which is a 32-byte diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_reference.go b/vendor/github.com/containers/image/v5/tarball/tarball_reference.go index d5578d9e8a..4e7ef3bcf8 100644 --- a/vendor/github.com/containers/image/v5/tarball/tarball_reference.go +++ b/vendor/github.com/containers/image/v5/tarball/tarball_reference.go @@ -3,6 +3,7 @@ package tarball import ( "context" "fmt" + "maps" "os" "strings" @@ -10,7 +11,6 @@ import ( "github.com/containers/image/v5/internal/image" "github.com/containers/image/v5/types" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "golang.org/x/exp/maps" ) // ConfigUpdater is an interface that ImageReferences for "tarball" images also diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_src.go b/vendor/github.com/containers/image/v5/tarball/tarball_src.go index 6f9bfaf756..f2c1611446 100644 --- a/vendor/github.com/containers/image/v5/tarball/tarball_src.go +++ b/vendor/github.com/containers/image/v5/tarball/tarball_src.go @@ -6,6 +6,7 @@ import ( "encoding/json" "fmt" "io" + "maps" "os" "runtime" "strings" @@ -18,7 +19,6 @@ import ( digest "github.com/opencontainers/go-digest" imgspecs "github.com/opencontainers/image-spec/specs-go" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "golang.org/x/exp/maps" ) type tarballImageSource struct { diff --git a/vendor/github.com/containers/storage/.cirrus.yml b/vendor/github.com/containers/storage/.cirrus.yml index 4e31427743..215a4b7bb8 100644 --- a/vendor/github.com/containers/storage/.cirrus.yml +++ b/vendor/github.com/containers/storage/.cirrus.yml @@ -23,7 +23,7 @@ env: # GCE project where images live IMAGE_PROJECT: "libpod-218412" # VM Image built in containers/automation_images - IMAGE_SUFFIX: "c20240320t153921z-f39f38d13" + IMAGE_SUFFIX: "c20240411t124913z-f39f38d13" FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}" DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}" @@ -126,7 +126,7 @@ lint_task: folder: $GOPATH/pkg/mod build_script: | apt-get update - apt-get install -y libbtrfs-dev libdevmapper-dev + apt-get install -y libbtrfs-dev test_script: | make TAGS=regex_precompile local-validate make lint @@ -171,7 +171,7 @@ vendor_task: cross_task: alias: cross container: - image: golang:1.20 + image: golang:1.21 build_script: make cross @@ -191,6 +191,6 @@ success_task: - vendor - cross container: - image: golang:1.20 + image: golang:1.21 clone_script: 'mkdir -p "$CIRRUS_WORKING_DIR"' # Source code not needed script: /bin/true diff --git a/vendor/github.com/containers/storage/Makefile b/vendor/github.com/containers/storage/Makefile index 8461c09019..7ee2642fc3 100644 --- a/vendor/github.com/containers/storage/Makefile +++ b/vendor/github.com/containers/storage/Makefile @@ -27,7 +27,7 @@ vendor-in-container NATIVETAGS := -AUTOTAGS := $(shell ./hack/btrfs_tag.sh) $(shell ./hack/libdm_tag.sh) $(shell ./hack/libsubid_tag.sh) +AUTOTAGS := $(shell ./hack/btrfs_tag.sh) $(shell ./hack/libsubid_tag.sh) BUILDFLAGS := -tags "$(AUTOTAGS) $(TAGS)" $(FLAGS) GO ?= go TESTFLAGS := $(shell $(GO) test -race $(BUILDFLAGS) ./pkg/stringutils 2>&1 > /dev/null && echo -race) diff --git a/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go b/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go deleted file mode 100644 index 388602b634..0000000000 --- a/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go +++ /dev/null @@ -1,254 +0,0 @@ -//go:build linux && cgo -// +build linux,cgo - -package devmapper - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "os" - "os/exec" - "path/filepath" - "strings" - - "github.com/sirupsen/logrus" -) - -type directLVMConfig struct { - Device string - ThinpPercent uint64 - ThinpMetaPercent uint64 - AutoExtendPercent uint64 - AutoExtendThreshold uint64 - MetaDataSize string -} - -var ( - errThinpPercentMissing = errors.New("must set both `dm.thinp_percent` and `dm.thinp_metapercent` if either is specified") - errThinpPercentTooBig = errors.New("combined `dm.thinp_percent` and `dm.thinp_metapercent` must not be greater than 100") - errMissingSetupDevice = errors.New("must provide device path in `dm.directlvm_device` in order to configure direct-lvm") -) - -func validateLVMConfig(cfg directLVMConfig) error { - if cfg.Device == "" { - return errMissingSetupDevice - } - if (cfg.ThinpPercent > 0 && cfg.ThinpMetaPercent == 0) || cfg.ThinpMetaPercent > 0 && cfg.ThinpPercent == 0 { - return errThinpPercentMissing - } - - if cfg.ThinpPercent+cfg.ThinpMetaPercent > 100 { - return errThinpPercentTooBig - } - return nil -} - -func checkDevAvailable(dev string) error { - lvmScan, err := exec.LookPath("lvmdiskscan") - if err != nil { - logrus.Debugf("could not find lvmdiskscan: %v", err) - return nil - } - - out, err := exec.Command(lvmScan).CombinedOutput() - if err != nil { - logrus.WithError(err).Error(string(out)) - return nil - } - - if !bytes.Contains(out, []byte(dev)) { - return fmt.Errorf("%s is not available for use with devicemapper", dev) - } - return nil -} - -func checkDevInVG(dev string) error { - pvDisplay, err := exec.LookPath("pvdisplay") - if err != nil { - logrus.Debugf("could not find pvdisplay: %v", err) - return nil - } - - out, err := exec.Command(pvDisplay, dev).CombinedOutput() - if err != nil { - logrus.WithError(err).Error(string(out)) - return nil - } - - scanner := bufio.NewScanner(bytes.NewReader(bytes.TrimSpace(out))) - for scanner.Scan() { - fields := strings.SplitAfter(strings.TrimSpace(scanner.Text()), "VG Name") - if len(fields) > 1 { - // got "VG Name" line" - vg := strings.TrimSpace(fields[1]) - if len(vg) > 0 { - return fmt.Errorf("%s is already part of a volume group %q: must remove this device from any volume group or provide a different device", dev, vg) - } - logrus.Error(fields) - break - } - } - return nil -} - -func checkDevHasFS(dev string) error { - blkid, err := exec.LookPath("blkid") - if err != nil { - logrus.Debugf("could not find blkid %v", err) - return nil - } - - out, err := exec.Command(blkid, dev).CombinedOutput() - if err != nil { - logrus.WithError(err).Error(string(out)) - return nil - } - - fields := bytes.Fields(out) - for _, f := range fields { - kv := bytes.Split(f, []byte{'='}) - if bytes.Equal(kv[0], []byte("TYPE")) { - v := bytes.Trim(kv[1], "\"") - if len(v) > 0 { - return fmt.Errorf("%s has a filesystem already, use dm.directlvm_device_force=true if you want to wipe the device", dev) - } - return nil - } - } - return nil -} - -func verifyBlockDevice(dev string, force bool) error { - absPath, err := filepath.Abs(dev) - if err != nil { - return fmt.Errorf("unable to get absolute path for %s: %s", dev, err) - } - realPath, err := filepath.EvalSymlinks(absPath) - if err != nil { - return fmt.Errorf("failed to canonicalise path for %s: %s", dev, err) - } - if err := checkDevAvailable(absPath); err != nil { - logrus.Infof("block device '%s' not available, checking '%s'", absPath, realPath) - if err := checkDevAvailable(realPath); err != nil { - return fmt.Errorf("neither '%s' nor '%s' are in the output of lvmdiskscan, can't use device", absPath, realPath) - } - } - if err := checkDevInVG(realPath); err != nil { - return err - } - - if force { - return nil - } - - if err := checkDevHasFS(realPath); err != nil { - return err - } - return nil -} - -func readLVMConfig(root string) (directLVMConfig, error) { - var cfg directLVMConfig - - p := filepath.Join(root, "setup-config.json") - b, err := os.ReadFile(p) - if err != nil { - if os.IsNotExist(err) { - return cfg, nil - } - return cfg, fmt.Errorf("reading existing setup config: %w", err) - } - - // check if this is just an empty file, no need to produce a json error later if so - if len(b) == 0 { - return cfg, nil - } - if err := json.Unmarshal(b, &cfg); err != nil { - return cfg, fmt.Errorf("unmarshaling previous device setup config: %w", err) - } - return cfg, nil -} - -func writeLVMConfig(root string, cfg directLVMConfig) error { - p := filepath.Join(root, "setup-config.json") - b, err := json.Marshal(cfg) - if err != nil { - return fmt.Errorf("marshalling direct lvm config: %w", err) - } - if err := os.WriteFile(p, b, 0o600); err != nil { - return fmt.Errorf("writing direct lvm config to file: %w", err) - } - return nil -} - -func setupDirectLVM(cfg directLVMConfig) error { - lvmProfileDir := "/etc/lvm/profile" - binaries := []string{"pvcreate", "vgcreate", "lvcreate", "lvconvert", "lvchange", "thin_check"} - - for _, bin := range binaries { - if _, err := exec.LookPath(bin); err != nil { - return fmt.Errorf("looking up command `"+bin+"` while setting up direct lvm: %w", err) - } - } - - err := os.MkdirAll(lvmProfileDir, 0o755) - if err != nil { - return fmt.Errorf("creating lvm profile directory: %w", err) - } - - if cfg.AutoExtendPercent == 0 { - cfg.AutoExtendPercent = 20 - } - - if cfg.AutoExtendThreshold == 0 { - cfg.AutoExtendThreshold = 80 - } - - if cfg.ThinpPercent == 0 { - cfg.ThinpPercent = 95 - } - if cfg.ThinpMetaPercent == 0 { - cfg.ThinpMetaPercent = 1 - } - if cfg.MetaDataSize == "" { - cfg.MetaDataSize = "128k" - } - - out, err := exec.Command("pvcreate", "--metadatasize", cfg.MetaDataSize, "-f", cfg.Device).CombinedOutput() - if err != nil { - return fmt.Errorf("%v: %w", string(out), err) - } - - out, err = exec.Command("vgcreate", "storage", cfg.Device).CombinedOutput() - if err != nil { - return fmt.Errorf("%v: %w", string(out), err) - } - - out, err = exec.Command("lvcreate", "--wipesignatures", "y", "-n", "thinpool", "storage", "--extents", fmt.Sprintf("%d%%VG", cfg.ThinpPercent)).CombinedOutput() - if err != nil { - return fmt.Errorf("%v: %w", string(out), err) - } - out, err = exec.Command("lvcreate", "--wipesignatures", "y", "-n", "thinpoolmeta", "storage", "--extents", fmt.Sprintf("%d%%VG", cfg.ThinpMetaPercent)).CombinedOutput() - if err != nil { - return fmt.Errorf("%v: %w", string(out), err) - } - - out, err = exec.Command("lvconvert", "-y", "--zero", "n", "-c", "512K", "--thinpool", "storage/thinpool", "--poolmetadata", "storage/thinpoolmeta").CombinedOutput() - if err != nil { - return fmt.Errorf("%v: %w", string(out), err) - } - - profile := fmt.Sprintf("activation{\nthin_pool_autoextend_threshold=%d\nthin_pool_autoextend_percent=%d\n}", cfg.AutoExtendThreshold, cfg.AutoExtendPercent) - err = os.WriteFile(lvmProfileDir+"/storage-thinpool.profile", []byte(profile), 0o600) - if err != nil { - return fmt.Errorf("writing storage thinp autoextend profile: %w", err) - } - - out, err = exec.Command("lvchange", "--metadataprofile", "storage-thinpool", "storage/thinpool").CombinedOutput() - if err != nil { - return fmt.Errorf("%s: %w", string(out), err) - } - return nil -} diff --git a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go deleted file mode 100644 index 6ded90f7f7..0000000000 --- a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go +++ /dev/null @@ -1,2889 +0,0 @@ -//go:build linux && cgo -// +build linux,cgo - -package devmapper - -import ( - "bufio" - "errors" - "fmt" - "io" - "io/fs" - "os" - "os/exec" - "path" - "path/filepath" - "reflect" - "strconv" - "strings" - "sync" - "time" - - graphdriver "github.com/containers/storage/drivers" - "github.com/containers/storage/pkg/devicemapper" - "github.com/containers/storage/pkg/dmesg" - "github.com/containers/storage/pkg/fileutils" - "github.com/containers/storage/pkg/idtools" - "github.com/containers/storage/pkg/loopback" - "github.com/containers/storage/pkg/mount" - "github.com/containers/storage/pkg/parsers" - "github.com/containers/storage/pkg/parsers/kernel" - units "github.com/docker/go-units" - "github.com/opencontainers/selinux/go-selinux/label" - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -var ( - defaultDataLoopbackSize int64 = 100 * 1024 * 1024 * 1024 - defaultMetaDataLoopbackSize int64 = 2 * 1024 * 1024 * 1024 - defaultBaseFsSize uint64 = 10 * 1024 * 1024 * 1024 - defaultThinpBlockSize uint32 = 128 // 64K = 128 512b sectors - defaultUdevSyncOverride = false - maxDeviceID = 0xffffff // 24 bit, pool limit - deviceIDMapSz = (maxDeviceID + 1) / 8 - driverDeferredRemovalSupport = false - enableDeferredRemoval = false - enableDeferredDeletion = false - userBaseSize = false - defaultMinFreeSpacePercent uint32 = 10 - lvmSetupConfigForce bool -) - -const ( - deviceSetMetaFile = "deviceset-metadata" - transactionMetaFile = "transaction-metadata" - xfs = "xfs" - ext4 = "ext4" - base = "base" -) - -type transaction struct { - OpenTransactionID uint64 `json:"open_transaction_id"` - DeviceIDHash string `json:"device_hash"` - DeviceID int `json:"device_id"` -} - -type devInfo struct { - Hash string `json:"-"` - DeviceID int `json:"device_id"` - Size uint64 `json:"size"` - TransactionID uint64 `json:"transaction_id"` - Initialized bool `json:"initialized"` - Deleted bool `json:"deleted"` - devices *DeviceSet - - // The global DeviceSet lock guarantees that we serialize all - // the calls to libdevmapper (which is not threadsafe), but we - // sometimes release that lock while sleeping. In that case - // this per-device lock is still held, protecting against - // other accesses to the device that we're doing the wait on. - // - // WARNING: In order to avoid AB-BA deadlocks when releasing - // the global lock while holding the per-device locks all - // device locks must be acquired *before* the device lock, and - // multiple device locks should be acquired parent before child. - lock sync.Mutex -} - -type metaData struct { - Devices map[string]*devInfo `json:"Devices"` -} - -// DeviceSet holds information about list of devices -type DeviceSet struct { - metaData `json:"-"` - sync.Mutex `json:"-"` // Protects all fields of DeviceSet and serializes calls into libdevmapper - root string - devicePrefix string - TransactionID uint64 `json:"-"` - NextDeviceID int `json:"next_device_id"` - deviceIDMap []byte - - // Options - dataLoopbackSize int64 - metaDataSize string - metaDataLoopbackSize int64 - baseFsSize uint64 - filesystem string - mountOptions string - mkfsArgs []string - dataDevice string // block or loop dev - dataLoopFile string // loopback file, if used - metadataDevice string // block or loop dev - metadataLoopFile string // loopback file, if used - doBlkDiscard bool - thinpBlockSize uint32 - thinPoolDevice string - transaction `json:"-"` - overrideUdevSyncCheck bool - deferredRemove bool // use deferred removal - deferredDelete bool // use deferred deletion - BaseDeviceUUID string // save UUID of base device - BaseDeviceFilesystem string // save filesystem of base device - nrDeletedDevices uint // number of deleted devices - deletionWorkerTicker *time.Ticker - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap - minFreeSpacePercent uint32 // min free space percentage in thinpool - xfsNospaceRetries string // max retries when xfs receives ENOSPC - lvmSetupConfig directLVMConfig -} - -// DiskUsage contains information about disk usage and is used when reporting Status of a device. -type DiskUsage struct { - // Used bytes on the disk. - Used uint64 - // Total bytes on the disk. - Total uint64 - // Available bytes on the disk. - Available uint64 -} - -// Status returns the information about the device. -type Status struct { - // PoolName is the name of the data pool. - PoolName string - // DataFile is the actual block device for data. - DataFile string - // DataLoopback loopback file, if used. - DataLoopback string - // MetadataFile is the actual block device for metadata. - MetadataFile string - // MetadataLoopback is the loopback file, if used. - MetadataLoopback string - // Data is the disk used for data. - Data DiskUsage - // Metadata is the disk used for meta data. - Metadata DiskUsage - // BaseDeviceSize is base size of container and image - BaseDeviceSize uint64 - // BaseDeviceFS is backing filesystem. - BaseDeviceFS string - // SectorSize size of the vector. - SectorSize uint64 - // UdevSyncSupported is true if sync is supported. - UdevSyncSupported bool - // DeferredRemoveEnabled is true then the device is not unmounted. - DeferredRemoveEnabled bool - // True if deferred deletion is enabled. This is different from - // deferred removal. "removal" means that device mapper device is - // deactivated. Thin device is still in thin pool and can be activated - // again. But "deletion" means that thin device will be deleted from - // thin pool and it can't be activated again. - DeferredDeleteEnabled bool - DeferredDeletedDeviceCount uint - MinFreeSpace uint64 -} - -// Structure used to export image/container metadata in inspect. -type deviceMetadata struct { - deviceID int - deviceSize uint64 // size in bytes - deviceName string // Device name as used during activation -} - -// DevStatus returns information about device mounted containing its id, size and sector information. -type DevStatus struct { - // DeviceID is the id of the device. - DeviceID int - // Size is the size of the filesystem. - Size uint64 - // TransactionID is a unique integer per device set used to identify an operation on the file system, this number is incremental. - TransactionID uint64 - // SizeInSectors indicates the size of the sectors allocated. - SizeInSectors uint64 - // MappedSectors indicates number of mapped sectors. - MappedSectors uint64 - // HighestMappedSector is the pointer to the highest mapped sector. - HighestMappedSector uint64 -} - -func getDevName(name string) string { - return "/dev/mapper/" + name -} - -func (info *devInfo) Name() string { - hash := info.Hash - if hash == "" { - hash = base - } - return fmt.Sprintf("%s-%s", info.devices.devicePrefix, hash) -} - -func (info *devInfo) DevName() string { - return getDevName(info.Name()) -} - -func (devices *DeviceSet) loopbackDir() string { - return path.Join(devices.root, "devicemapper") -} - -func (devices *DeviceSet) metadataDir() string { - return path.Join(devices.root, "metadata") -} - -func (devices *DeviceSet) metadataFile(info *devInfo) string { - file := info.Hash - if file == "" { - file = base - } - return path.Join(devices.metadataDir(), file) -} - -func (devices *DeviceSet) transactionMetaFile() string { - return path.Join(devices.metadataDir(), transactionMetaFile) -} - -func (devices *DeviceSet) deviceSetMetaFile() string { - return path.Join(devices.metadataDir(), deviceSetMetaFile) -} - -func (devices *DeviceSet) oldMetadataFile() string { - return path.Join(devices.loopbackDir(), "json") -} - -func (devices *DeviceSet) getPoolName() string { - if devices.thinPoolDevice == "" { - return devices.devicePrefix + "-pool" - } - return devices.thinPoolDevice -} - -func (devices *DeviceSet) getPoolDevName() string { - return getDevName(devices.getPoolName()) -} - -func (devices *DeviceSet) hasImage(name string) bool { - dirname := devices.loopbackDir() - filename := path.Join(dirname, name) - - err := fileutils.Exists(filename) - return err == nil -} - -// ensureImage creates a sparse file of bytes at the path -// /devicemapper/. -// If the file already exists and new size is larger than its current size, it grows to the new size. -// Either way it returns the full path. -func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) { - dirname := devices.loopbackDir() - filename := path.Join(dirname, name) - - uid, gid, err := idtools.GetRootUIDGID(devices.uidMaps, devices.gidMaps) - if err != nil { - return "", err - } - if err := idtools.MkdirAllAs(dirname, 0o700, uid, gid); err != nil { - return "", err - } - - if fi, err := os.Stat(filename); err != nil { - if !os.IsNotExist(err) { - return "", err - } - logrus.Debugf("devmapper: Creating loopback file %s for device-manage use", filename) - file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0o600) - if err != nil { - return "", err - } - defer file.Close() - - if err := file.Truncate(size); err != nil { - return "", err - } - } else { - if fi.Size() < size { - file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0o600) - if err != nil { - return "", err - } - defer file.Close() - if err := file.Truncate(size); err != nil { - return "", fmt.Errorf("devmapper: Unable to grow loopback file %s: %w", filename, err) - } - } else if fi.Size() > size { - logrus.Warnf("devmapper: Can't shrink loopback file %s", filename) - } - } - return filename, nil -} - -func (devices *DeviceSet) allocateTransactionID() uint64 { - devices.OpenTransactionID = devices.TransactionID + 1 - return devices.OpenTransactionID -} - -func (devices *DeviceSet) updatePoolTransactionID() error { - if err := devicemapper.SetTransactionID(devices.getPoolDevName(), devices.TransactionID, devices.OpenTransactionID); err != nil { - return fmt.Errorf("devmapper: Error setting devmapper transaction ID: %s", err) - } - devices.TransactionID = devices.OpenTransactionID - return nil -} - -func (devices *DeviceSet) removeMetadata(info *devInfo) error { - if err := os.RemoveAll(devices.metadataFile(info)); err != nil { - return fmt.Errorf("devmapper: Error removing metadata file %s: %s", devices.metadataFile(info), err) - } - return nil -} - -// Given json data and file path, write it to disk -func (devices *DeviceSet) writeMetaFile(jsonData []byte, filePath string) error { - tmpFile, err := os.CreateTemp(devices.metadataDir(), ".tmp") - if err != nil { - return fmt.Errorf("devmapper: Error creating metadata file: %s", err) - } - - n, err := tmpFile.Write(jsonData) - if err != nil { - return fmt.Errorf("devmapper: Error writing metadata to %s: %s", tmpFile.Name(), err) - } - if n < len(jsonData) { - return io.ErrShortWrite - } - if err := tmpFile.Sync(); err != nil { - return fmt.Errorf("devmapper: Error syncing metadata file %s: %s", tmpFile.Name(), err) - } - if err := tmpFile.Close(); err != nil { - return fmt.Errorf("devmapper: Error closing metadata file %s: %s", tmpFile.Name(), err) - } - if err := os.Rename(tmpFile.Name(), filePath); err != nil { - return fmt.Errorf("devmapper: Error committing metadata file %s: %s", tmpFile.Name(), err) - } - - return nil -} - -func (devices *DeviceSet) saveMetadata(info *devInfo) error { - jsonData, err := json.Marshal(info) - if err != nil { - return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) - } - if err := devices.writeMetaFile(jsonData, devices.metadataFile(info)); err != nil { - return err - } - return nil -} - -func (devices *DeviceSet) markDeviceIDUsed(deviceID int) { - var mask byte - i := deviceID % 8 - mask = 1 << uint(i) - devices.deviceIDMap[deviceID/8] = devices.deviceIDMap[deviceID/8] | mask -} - -func (devices *DeviceSet) markDeviceIDFree(deviceID int) { - var mask byte - i := deviceID % 8 - mask = ^(1 << uint(i)) - devices.deviceIDMap[deviceID/8] = devices.deviceIDMap[deviceID/8] & mask -} - -func (devices *DeviceSet) isDeviceIDFree(deviceID int) bool { - var mask byte - i := deviceID % 8 - mask = (1 << uint(i)) - return (devices.deviceIDMap[deviceID/8] & mask) == 0 -} - -// Should be called with devices.Lock() held. -func (devices *DeviceSet) lookupDevice(hash string) (*devInfo, error) { - info := devices.Devices[hash] - if info == nil { - info = devices.loadMetadata(hash) - if info == nil { - return nil, fmt.Errorf("devmapper: Unknown device %s", hash) - } - - devices.Devices[hash] = info - } - return info, nil -} - -func (devices *DeviceSet) lookupDeviceWithLock(hash string) (*devInfo, error) { - devices.Lock() - defer devices.Unlock() - info, err := devices.lookupDevice(hash) - return info, err -} - -// This function relies on that device hash map has been loaded in advance. -// Should be called with devices.Lock() held. -func (devices *DeviceSet) constructDeviceIDMap() { - logrus.Debug("devmapper: constructDeviceIDMap()") - defer logrus.Debug("devmapper: constructDeviceIDMap() END") - - for _, info := range devices.Devices { - devices.markDeviceIDUsed(info.DeviceID) - logrus.Debugf("devmapper: Added deviceId=%d to DeviceIdMap", info.DeviceID) - } -} - -func (devices *DeviceSet) deviceFileWalkFunction(path string, name string) error { - // Skip some of the meta files which are not device files. - if strings.HasSuffix(name, ".migrated") { - logrus.Debugf("devmapper: Skipping file %s", path) - return nil - } - - if strings.HasPrefix(name, ".") { - logrus.Debugf("devmapper: Skipping file %s", path) - return nil - } - - if name == deviceSetMetaFile { - logrus.Debugf("devmapper: Skipping file %s", path) - return nil - } - - if name == transactionMetaFile { - logrus.Debugf("devmapper: Skipping file %s", path) - return nil - } - - logrus.Debugf("devmapper: Loading data for file %s", path) - - // Include deleted devices also as cleanup delete device logic - // will go through it and see if there are any deleted devices. - if _, err := devices.lookupDevice(name); err != nil { - return fmt.Errorf("devmapper: Error looking up device %s:%w", name, err) - } - - return nil -} - -func (devices *DeviceSet) loadDeviceFilesOnStart() error { - logrus.Debug("devmapper: loadDeviceFilesOnStart()") - defer logrus.Debug("devmapper: loadDeviceFilesOnStart() END") - - scan := func(path string, d fs.DirEntry, err error) error { - if err != nil { - logrus.Debugf("devmapper: Can't walk the file %s: %v", path, err) - return nil - } - - // Skip any directories - if d.IsDir() { - return nil - } - - return devices.deviceFileWalkFunction(path, d.Name()) - } - - return filepath.WalkDir(devices.metadataDir(), scan) -} - -// Should be called with devices.Lock() held. -func (devices *DeviceSet) unregisterDevice(hash string) error { - logrus.Debugf("devmapper: unregisterDevice(%v)", hash) - info := &devInfo{ - Hash: hash, - } - - delete(devices.Devices, hash) - - if err := devices.removeMetadata(info); err != nil { - logrus.Debugf("devmapper: Error removing metadata: %s", err) - return err - } - - return nil -} - -// Should be called with devices.Lock() held. -func (devices *DeviceSet) registerDevice(id int, hash string, size uint64, transactionID uint64) (*devInfo, error) { - logrus.Debugf("devmapper: registerDevice(%v, %v)", id, hash) - info := &devInfo{ - Hash: hash, - DeviceID: id, - Size: size, - TransactionID: transactionID, - Initialized: false, - devices: devices, - } - - devices.Devices[hash] = info - - if err := devices.saveMetadata(info); err != nil { - // Try to remove unused device - delete(devices.Devices, hash) - return nil, err - } - - return info, nil -} - -func (devices *DeviceSet) activateDeviceIfNeeded(info *devInfo, ignoreDeleted bool) error { - logrus.Debugf("devmapper: activateDeviceIfNeeded(%v)", info.Hash) - - if info.Deleted && !ignoreDeleted { - return fmt.Errorf("devmapper: Can't activate device %v as it is marked for deletion", info.Hash) - } - - // Make sure deferred removal on device is canceled, if one was - // scheduled. - if err := devices.cancelDeferredRemovalIfNeeded(info); err != nil { - return fmt.Errorf("devmapper: Device Deferred Removal Cancellation Failed: %s", err) - } - - if devinfo, _ := devicemapper.GetInfo(info.Name()); devinfo != nil && devinfo.Exists != 0 { - return nil - } - - return devicemapper.ActivateDevice(devices.getPoolDevName(), info.Name(), info.DeviceID, info.Size) -} - -// xfsSupported checks if xfs is supported, returns nil if it is, otherwise an error -func xfsSupported() error { - // Make sure mkfs.xfs is available - if _, err := exec.LookPath("mkfs.xfs"); err != nil { - return err // error text is descriptive enough - } - - // Check if kernel supports xfs filesystem or not. - exec.Command("modprobe", xfs).Run() - - f, err := os.Open("/proc/filesystems") - if err != nil { - return fmt.Errorf("checking for xfs support: %w", err) - } - defer f.Close() - - s := bufio.NewScanner(f) - for s.Scan() { - if strings.HasSuffix(s.Text(), "\txfs") { - return nil - } - } - - if err := s.Err(); err != nil { - return fmt.Errorf("checking for xfs support: %w", err) - } - - return errors.New(`kernel does not support xfs, or "modprobe xfs" failed`) -} - -func determineDefaultFS() string { - err := xfsSupported() - if err == nil { - return xfs - } - - logrus.Warnf("devmapper: XFS is not supported in your system (%v). Defaulting to %s filesystem", ext4, err) - return ext4 -} - -// mkfsOptions tries to figure out whether some additional mkfs options are required -func mkfsOptions(fs string) []string { - if fs == xfs && !kernel.CheckKernelVersion(3, 16, 0) { - // For kernels earlier than 3.16 (and newer xfsutils), - // some xfs features need to be explicitly disabled. - return []string{"-m", "crc=0,finobt=0"} - } - - return []string{} -} - -func (devices *DeviceSet) createFilesystem(info *devInfo) (err error) { - devname := info.DevName() - - if devices.filesystem == "" { - devices.filesystem = determineDefaultFS() - } - if err := devices.saveBaseDeviceFilesystem(devices.filesystem); err != nil { - return err - } - - args := mkfsOptions(devices.filesystem) - args = append(args, devices.mkfsArgs...) - args = append(args, devname) - - logrus.Infof("devmapper: Creating filesystem %s on device %s, mkfs args: %v", devices.filesystem, info.Name(), args) - defer func() { - if err != nil { - logrus.Infof("devmapper: Error while creating filesystem %s on device %s: %v", devices.filesystem, info.Name(), err) - } else { - logrus.Infof("devmapper: Successfully created filesystem %s on device %s", devices.filesystem, info.Name()) - } - }() - - switch devices.filesystem { - case xfs: - err = exec.Command("mkfs.xfs", args...).Run() - case ext4: - err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0,lazy_journal_init=0"}, args...)...).Run() - if err != nil { - err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0"}, args...)...).Run() - } - if err != nil { - return err - } - err = exec.Command("tune2fs", append([]string{"-c", "-1", "-i", "0"}, devname)...).Run() - default: - err = fmt.Errorf("devmapper: Unsupported filesystem type %s", devices.filesystem) - } - return -} - -func (devices *DeviceSet) migrateOldMetaData() error { - // Migrate old metadata file - jsonData, err := os.ReadFile(devices.oldMetadataFile()) - if err != nil && !os.IsNotExist(err) { - return err - } - - if jsonData != nil { - m := metaData{Devices: make(map[string]*devInfo)} - - if err := json.Unmarshal(jsonData, &m); err != nil { - return err - } - - for hash, info := range m.Devices { - info.Hash = hash - devices.saveMetadata(info) - } - if err := os.Rename(devices.oldMetadataFile(), devices.oldMetadataFile()+".migrated"); err != nil { - return err - } - - } - - return nil -} - -// Cleanup deleted devices. It assumes that all the devices have been -// loaded in the hash table. -func (devices *DeviceSet) cleanupDeletedDevices() error { - devices.Lock() - - // If there are no deleted devices, there is nothing to do. - if devices.nrDeletedDevices == 0 { - devices.Unlock() - return nil - } - - var deletedDevices []*devInfo - - for _, info := range devices.Devices { - if !info.Deleted { - continue - } - logrus.Debugf("devmapper: Found deleted device %s.", info.Hash) - deletedDevices = append(deletedDevices, info) - } - - // Delete the deleted devices. DeleteDevice() first takes the info lock - // and then devices.Lock(). So drop it to avoid deadlock. - devices.Unlock() - - for _, info := range deletedDevices { - // This will again try deferred deletion. - if err := devices.DeleteDevice(info.Hash, false); err != nil { - logrus.Warnf("devmapper: Deletion of device %s, device_id=%v failed:%v", info.Hash, info.DeviceID, err) - } - } - - return nil -} - -func (devices *DeviceSet) countDeletedDevices() { - for _, info := range devices.Devices { - if !info.Deleted { - continue - } - devices.nrDeletedDevices++ - } -} - -func (devices *DeviceSet) startDeviceDeletionWorker() { - // Deferred deletion is not enabled. Don't do anything. - if !devices.deferredDelete { - return - } - - // Cleanup right away if there are any leaked devices. Note this - // could cause some slowdown for process startup, if there were - // Leaked devices - devices.cleanupDeletedDevices() - logrus.Debug("devmapper: Worker to cleanup deleted devices started") - for range devices.deletionWorkerTicker.C { - devices.cleanupDeletedDevices() - } -} - -func (devices *DeviceSet) initMetaData() error { - devices.Lock() - defer devices.Unlock() - - if err := devices.migrateOldMetaData(); err != nil { - return err - } - - _, transactionID, _, _, _, _, err := devices.poolStatus() - if err != nil { - return err - } - - devices.TransactionID = transactionID - - if err := devices.loadDeviceFilesOnStart(); err != nil { - return fmt.Errorf("devmapper: Failed to load device files:%w", err) - } - - devices.constructDeviceIDMap() - devices.countDeletedDevices() - - if err := devices.processPendingTransaction(); err != nil { - return err - } - - // Start a goroutine to cleanup Deleted Devices - go devices.startDeviceDeletionWorker() - return nil -} - -func (devices *DeviceSet) incNextDeviceID() { - // IDs are 24bit, so wrap around - devices.NextDeviceID = (devices.NextDeviceID + 1) & maxDeviceID -} - -func (devices *DeviceSet) getNextFreeDeviceID() (int, error) { - devices.incNextDeviceID() - for i := 0; i <= maxDeviceID; i++ { - if devices.isDeviceIDFree(devices.NextDeviceID) { - devices.markDeviceIDUsed(devices.NextDeviceID) - return devices.NextDeviceID, nil - } - devices.incNextDeviceID() - } - - return 0, fmt.Errorf("devmapper: Unable to find a free device ID") -} - -func (devices *DeviceSet) poolHasFreeSpace() error { - if devices.minFreeSpacePercent == 0 { - return nil - } - - _, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus() - if err != nil { - return err - } - - minFreeData := (dataTotal * uint64(devices.minFreeSpacePercent)) / 100 - if minFreeData < 1 { - minFreeData = 1 - } - dataFree := dataTotal - dataUsed - if dataFree < minFreeData { - return fmt.Errorf("devmapper: Thin Pool has %v free data blocks which is less than minimum required %v free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior", (dataTotal - dataUsed), minFreeData) - } - - minFreeMetadata := (metadataTotal * uint64(devices.minFreeSpacePercent)) / 100 - if minFreeMetadata < 1 { - minFreeMetadata = 1 - } - - metadataFree := metadataTotal - metadataUsed - if metadataFree < minFreeMetadata { - return fmt.Errorf("devmapper: Thin Pool has %v free metadata blocks which is less than minimum required %v free metadata blocks. Create more free metadata space in thin pool or use dm.min_free_space option to change behavior", (metadataTotal - metadataUsed), minFreeMetadata) - } - - return nil -} - -func (devices *DeviceSet) createRegisterDevice(hash string) (*devInfo, error) { - devices.Lock() - defer devices.Unlock() - - deviceID, err := devices.getNextFreeDeviceID() - if err != nil { - return nil, err - } - - if err := devices.openTransaction(hash, deviceID); err != nil { - logrus.Debugf("devmapper: Error opening transaction hash = %s deviceID = %d", hash, deviceID) - devices.markDeviceIDFree(deviceID) - return nil, err - } - - for { - if err := devicemapper.CreateDevice(devices.getPoolDevName(), deviceID); err != nil { - if devicemapper.DeviceIDExists(err) { - // Device ID already exists. This should not - // happen. Now we have a mechanism to find - // a free device ID. So something is not right. - // Give a warning and continue. - logrus.Errorf("devmapper: Device ID %d exists in pool but it is supposed to be unused", deviceID) - deviceID, err = devices.getNextFreeDeviceID() - if err != nil { - return nil, err - } - // Save new device id into transaction - devices.refreshTransaction(deviceID) - continue - } - logrus.Debugf("devmapper: Error creating device: %s", err) - devices.markDeviceIDFree(deviceID) - return nil, err - } - break - } - - logrus.Debugf("devmapper: Registering device (id %v) with FS size %v", deviceID, devices.baseFsSize) - info, err := devices.registerDevice(deviceID, hash, devices.baseFsSize, devices.OpenTransactionID) - if err != nil { - _ = devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) - devices.markDeviceIDFree(deviceID) - return nil, err - } - - if err := devices.closeTransaction(); err != nil { - devices.unregisterDevice(hash) - devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) - devices.markDeviceIDFree(deviceID) - return nil, err - } - return info, nil -} - -func (devices *DeviceSet) takeSnapshot(hash string, baseInfo *devInfo, size uint64) error { - var ( - devinfo *devicemapper.Info - err error - ) - - if err = devices.poolHasFreeSpace(); err != nil { - return err - } - - if devices.deferredRemove { - devinfo, err = devicemapper.GetInfoWithDeferred(baseInfo.Name()) - if err != nil { - return err - } - if devinfo != nil && devinfo.DeferredRemove != 0 { - err = devices.cancelDeferredRemoval(baseInfo) - if err != nil { - // If Error is ErrEnxio. Device is probably already gone. Continue. - if !errors.Is(err, devicemapper.ErrEnxio) { - return err - } - devinfo = nil - } else { - defer devices.deactivateDevice(baseInfo) - } - } - } else { - devinfo, err = devicemapper.GetInfo(baseInfo.Name()) - if err != nil { - return err - } - } - - doSuspend := devinfo != nil && devinfo.Exists != 0 - - if doSuspend { - if err = devicemapper.SuspendDevice(baseInfo.Name()); err != nil { - return err - } - defer devicemapper.ResumeDevice(baseInfo.Name()) - } - - if err = devices.createRegisterSnapDevice(hash, baseInfo, size); err != nil { - return err - } - - return nil -} - -func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInfo, size uint64) error { - deviceID, err := devices.getNextFreeDeviceID() - if err != nil { - return err - } - - if err := devices.openTransaction(hash, deviceID); err != nil { - logrus.Debugf("devmapper: Error opening transaction hash = %s deviceID = %d", hash, deviceID) - devices.markDeviceIDFree(deviceID) - return err - } - - for { - if err := devicemapper.CreateSnapDeviceRaw(devices.getPoolDevName(), deviceID, baseInfo.DeviceID); err != nil { - if devicemapper.DeviceIDExists(err) { - // Device ID already exists. This should not - // happen. Now we have a mechanism to find - // a free device ID. So something is not right. - // Give a warning and continue. - logrus.Errorf("devmapper: Device ID %d exists in pool but it is supposed to be unused", deviceID) - deviceID, err = devices.getNextFreeDeviceID() - if err != nil { - return err - } - // Save new device id into transaction - devices.refreshTransaction(deviceID) - continue - } - logrus.Debugf("devmapper: Error creating snap device: %s", err) - devices.markDeviceIDFree(deviceID) - return err - } - break - } - - if _, err := devices.registerDevice(deviceID, hash, size, devices.OpenTransactionID); err != nil { - devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) - devices.markDeviceIDFree(deviceID) - logrus.Debugf("devmapper: Error registering device: %s", err) - return err - } - - if err := devices.closeTransaction(); err != nil { - devices.unregisterDevice(hash) - devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) - devices.markDeviceIDFree(deviceID) - return err - } - return nil -} - -func (devices *DeviceSet) loadMetadata(hash string) *devInfo { - info := &devInfo{Hash: hash, devices: devices} - - jsonData, err := os.ReadFile(devices.metadataFile(info)) - if err != nil { - logrus.Debugf("devmapper: Failed to read %s with err: %v", devices.metadataFile(info), err) - return nil - } - - if err := json.Unmarshal(jsonData, &info); err != nil { - logrus.Debugf("devmapper: Failed to unmarshal devInfo from %s with err: %v", devices.metadataFile(info), err) - return nil - } - - if info.DeviceID > maxDeviceID { - logrus.Errorf("devmapper: Ignoring Invalid DeviceId=%d", info.DeviceID) - return nil - } - - return info -} - -func getDeviceUUID(device string) (string, error) { - out, err := exec.Command("blkid", "-s", "UUID", "-o", "value", device).Output() - if err != nil { - return "", fmt.Errorf("devmapper: Failed to find uuid for device %s:%w", device, err) - } - - uuid := strings.TrimSuffix(string(out), "\n") - uuid = strings.TrimSpace(uuid) - logrus.Debugf("devmapper: UUID for device: %s is:%s", device, uuid) - return uuid, nil -} - -func (devices *DeviceSet) getBaseDeviceSize() uint64 { - info, _ := devices.lookupDevice("") - if info == nil { - return 0 - } - return info.Size -} - -func (devices *DeviceSet) getBaseDeviceFS() string { - return devices.BaseDeviceFilesystem -} - -func (devices *DeviceSet) verifyBaseDeviceUUIDFS(baseInfo *devInfo) error { - devices.Lock() - defer devices.Unlock() - - if devices.filesystem == "" { - devices.filesystem = determineDefaultFS() - } - - if err := devices.activateDeviceIfNeeded(baseInfo, false); err != nil { - return err - } - defer devices.deactivateDevice(baseInfo) - - uuid, err := getDeviceUUID(baseInfo.DevName()) - if err != nil { - return err - } - - if devices.BaseDeviceUUID != uuid { - return fmt.Errorf("devmapper: Current Base Device UUID:%s does not match with stored UUID:%s. Possibly using a different thin pool than last invocation", uuid, devices.BaseDeviceUUID) - } - - if devices.BaseDeviceFilesystem == "" { - fsType, err := ProbeFsType(baseInfo.DevName()) - if err != nil { - return err - } - if err := devices.saveBaseDeviceFilesystem(fsType); err != nil { - return err - } - } - - // If user specified a filesystem using dm.fs option and current - // file system of base image is not same, warn user that dm.fs - // will be ignored. - if devices.BaseDeviceFilesystem != devices.filesystem { - logrus.Warnf("devmapper: Base device already exists and has filesystem %s on it. User specified filesystem %s will be ignored.", devices.BaseDeviceFilesystem, devices.filesystem) - devices.filesystem = devices.BaseDeviceFilesystem - } - return nil -} - -func (devices *DeviceSet) saveBaseDeviceFilesystem(fs string) error { - devices.BaseDeviceFilesystem = fs - return devices.saveDeviceSetMetaData() -} - -func (devices *DeviceSet) saveBaseDeviceUUID(baseInfo *devInfo) error { - devices.Lock() - defer devices.Unlock() - - if err := devices.activateDeviceIfNeeded(baseInfo, false); err != nil { - return err - } - defer devices.deactivateDevice(baseInfo) - - uuid, err := getDeviceUUID(baseInfo.DevName()) - if err != nil { - return err - } - - devices.BaseDeviceUUID = uuid - return devices.saveDeviceSetMetaData() -} - -func (devices *DeviceSet) createBaseImage() error { - logrus.Debug("devmapper: Initializing base device-mapper thin volume") - - // Create initial device - info, err := devices.createRegisterDevice("") - if err != nil { - return err - } - - logrus.Debug("devmapper: Creating filesystem on base device-mapper thin volume") - - if err := devices.activateDeviceIfNeeded(info, false); err != nil { - return err - } - - if err := devices.createFilesystem(info); err != nil { - return err - } - - info.Initialized = true - if err := devices.saveMetadata(info); err != nil { - info.Initialized = false - return err - } - - if err := devices.saveBaseDeviceUUID(info); err != nil { - return fmt.Errorf("devmapper: Could not query and save base device UUID:%w", err) - } - - return nil -} - -// Returns if thin pool device exists or not. If device exists, also makes -// sure it is a thin pool device and not some other type of device. -func (devices *DeviceSet) thinPoolExists(thinPoolDevice string) (bool, error) { - logrus.Debugf("devmapper: Checking for existence of the pool %s", thinPoolDevice) - - info, err := devicemapper.GetInfo(thinPoolDevice) - if err != nil { - return false, fmt.Errorf("devmapper: GetInfo() on device %s failed: %w", thinPoolDevice, err) - } - - // Device does not exist. - if info.Exists == 0 { - return false, nil - } - - _, _, deviceType, _, err := devicemapper.GetStatus(thinPoolDevice) - if err != nil { - return false, fmt.Errorf("devmapper: GetStatus() on device %s failed: %w", thinPoolDevice, err) - } - - if deviceType != "thin-pool" { - return false, fmt.Errorf("devmapper: Device %s is not a thin pool", thinPoolDevice) - } - - return true, nil -} - -func (devices *DeviceSet) checkThinPool() error { - _, transactionID, dataUsed, _, _, _, err := devices.poolStatus() - if err != nil { - return err - } - if dataUsed != 0 { - return fmt.Errorf("devmapper: Unable to take ownership of thin-pool (%s) that already has used data blocks", - devices.thinPoolDevice) - } - if transactionID != 0 { - return fmt.Errorf("devmapper: Unable to take ownership of thin-pool (%s) with non-zero transaction ID", - devices.thinPoolDevice) - } - return nil -} - -// Base image is initialized properly. Either save UUID for first time (for -// upgrade case or verify UUID. -func (devices *DeviceSet) setupVerifyBaseImageUUIDFS(baseInfo *devInfo) error { - // If BaseDeviceUUID is nil (upgrade case), save it and return success. - if devices.BaseDeviceUUID == "" { - if err := devices.saveBaseDeviceUUID(baseInfo); err != nil { - return fmt.Errorf("devmapper: Could not query and save base device UUID:%w", err) - } - return nil - } - - if err := devices.verifyBaseDeviceUUIDFS(baseInfo); err != nil { - return fmt.Errorf("devmapper: Base Device UUID and Filesystem verification failed: %w", err) - } - - return nil -} - -func (devices *DeviceSet) checkGrowBaseDeviceFS(info *devInfo) error { - if !userBaseSize { - return nil - } - - if devices.baseFsSize < devices.getBaseDeviceSize() { - return fmt.Errorf("devmapper: Base device size cannot be smaller than %s", units.HumanSize(float64(devices.getBaseDeviceSize()))) - } - - if devices.baseFsSize == devices.getBaseDeviceSize() { - return nil - } - - info.lock.Lock() - defer info.lock.Unlock() - - devices.Lock() - defer devices.Unlock() - - info.Size = devices.baseFsSize - - if err := devices.saveMetadata(info); err != nil { - // Try to remove unused device - delete(devices.Devices, info.Hash) - return err - } - - return devices.growFS(info) -} - -func (devices *DeviceSet) growFS(info *devInfo) error { - if err := devices.activateDeviceIfNeeded(info, false); err != nil { - return fmt.Errorf("activating devmapper device: %s", err) - } - - defer devices.deactivateDevice(info) - - fsMountPoint := "/run/containers/storage/mnt" - if err := fileutils.Exists(fsMountPoint); os.IsNotExist(err) { - if err := os.MkdirAll(fsMountPoint, 0o700); err != nil { - return err - } - defer os.RemoveAll(fsMountPoint) - } - - options := "" - if devices.BaseDeviceFilesystem == xfs { - // XFS needs nouuid or it can't mount filesystems with the same fs - options = joinMountOptions(options, "nouuid") - } - options = joinMountOptions(options, devices.mountOptions) - - if err := mount.Mount(info.DevName(), fsMountPoint, devices.BaseDeviceFilesystem, options); err != nil { - return fmt.Errorf("failed to mount; dmesg: %s: %w", string(dmesg.Dmesg(256)), err) - } - - defer func() { - if err := mount.Unmount(fsMountPoint); err != nil { - logrus.Warnf("devmapper.growFS cleanup error: %v", err) - } - }() - - switch devices.BaseDeviceFilesystem { - case ext4: - if out, err := exec.Command("resize2fs", info.DevName()).CombinedOutput(); err != nil { - return fmt.Errorf("failed to grow rootfs:%s:%w", string(out), err) - } - case xfs: - if out, err := exec.Command("xfs_growfs", info.DevName()).CombinedOutput(); err != nil { - return fmt.Errorf("failed to grow rootfs:%s:%w", string(out), err) - } - default: - return fmt.Errorf("unsupported filesystem type %s", devices.BaseDeviceFilesystem) - } - return nil -} - -func (devices *DeviceSet) setupBaseImage() error { - oldInfo, _ := devices.lookupDeviceWithLock("") - - // base image already exists. If it is initialized properly, do UUID - // verification and return. Otherwise remove image and set it up - // fresh. - - if oldInfo != nil { - if oldInfo.Initialized && !oldInfo.Deleted { - if err := devices.setupVerifyBaseImageUUIDFS(oldInfo); err != nil { - return err - } - - if err := devices.checkGrowBaseDeviceFS(oldInfo); err != nil { - return err - } - - return nil - } - - logrus.Debug("devmapper: Removing uninitialized base image") - // If previous base device is in deferred delete state, - // that needs to be cleaned up first. So don't try - // deferred deletion. - if err := devices.DeleteDevice("", true); err != nil { - return err - } - } - - // If we are setting up base image for the first time, make sure - // thin pool is empty. - if devices.thinPoolDevice != "" && oldInfo == nil { - if err := devices.checkThinPool(); err != nil { - return err - } - } - - // Create new base image device - if err := devices.createBaseImage(); err != nil { - return err - } - - return nil -} - -func setCloseOnExec(name string) { - fileEntries, _ := os.ReadDir("/proc/self/fd") - for _, e := range fileEntries { - link, _ := os.Readlink(filepath.Join("/proc/self/fd", e.Name())) - if link == name { - fd, err := strconv.Atoi(e.Name()) - if err == nil { - unix.CloseOnExec(fd) - } - } - } -} - -func major(device uint64) uint64 { - return (device >> 8) & 0xfff -} - -func minor(device uint64) uint64 { - return (device & 0xff) | ((device >> 12) & 0xfff00) -} - -// ResizePool increases the size of the pool. -func (devices *DeviceSet) ResizePool(size int64) error { - dirname := devices.loopbackDir() - datafilename := path.Join(dirname, "data") - if len(devices.dataDevice) > 0 { - datafilename = devices.dataDevice - } - metadatafilename := path.Join(dirname, "metadata") - if len(devices.metadataDevice) > 0 { - metadatafilename = devices.metadataDevice - } - - datafile, err := os.OpenFile(datafilename, os.O_RDWR, 0) - if datafile == nil { - return err - } - defer datafile.Close() - - fi, err := datafile.Stat() - if fi == nil { - return err - } - - if fi.Size() > size { - return fmt.Errorf("devmapper: Can't shrink file") - } - - dataloopback := loopback.FindLoopDeviceFor(datafile) - if dataloopback == nil { - return fmt.Errorf("devmapper: Unable to find loopback mount for: %s", datafilename) - } - defer dataloopback.Close() - - metadatafile, err := os.OpenFile(metadatafilename, os.O_RDWR, 0) - if metadatafile == nil { - return err - } - defer metadatafile.Close() - - metadataloopback := loopback.FindLoopDeviceFor(metadatafile) - if metadataloopback == nil { - return fmt.Errorf("devmapper: Unable to find loopback mount for: %s", metadatafilename) - } - defer metadataloopback.Close() - - // Grow loopback file - if err := datafile.Truncate(size); err != nil { - return fmt.Errorf("devmapper: Unable to grow loopback file: %s", err) - } - - // Reload size for loopback device - if err := loopback.SetCapacity(dataloopback); err != nil { - return fmt.Errorf("unable to update loopback capacity: %s", err) - } - - // Suspend the pool - if err := devicemapper.SuspendDevice(devices.getPoolName()); err != nil { - return fmt.Errorf("devmapper: Unable to suspend pool: %s", err) - } - - // Reload with the new block sizes - if err := devicemapper.ReloadPool(devices.getPoolName(), dataloopback, metadataloopback, devices.thinpBlockSize); err != nil { - return fmt.Errorf("devmapper: Unable to reload pool: %s", err) - } - - // Resume the pool - if err := devicemapper.ResumeDevice(devices.getPoolName()); err != nil { - return fmt.Errorf("devmapper: Unable to resume pool: %s", err) - } - - return nil -} - -func (devices *DeviceSet) loadTransactionMetaData() error { - jsonData, err := os.ReadFile(devices.transactionMetaFile()) - if err != nil { - // There is no active transaction. This will be the case - // during upgrade. - if os.IsNotExist(err) { - devices.OpenTransactionID = devices.TransactionID - return nil - } - return err - } - - json.Unmarshal(jsonData, &devices.transaction) - return nil -} - -func (devices *DeviceSet) saveTransactionMetaData() error { - jsonData, err := json.Marshal(&devices.transaction) - if err != nil { - return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) - } - - return devices.writeMetaFile(jsonData, devices.transactionMetaFile()) -} - -func (devices *DeviceSet) removeTransactionMetaData() error { - return os.RemoveAll(devices.transactionMetaFile()) -} - -func (devices *DeviceSet) rollbackTransaction() error { - logrus.Debugf("devmapper: Rolling back open transaction: TransactionID=%d hash=%s device_id=%d", devices.OpenTransactionID, devices.DeviceIDHash, devices.DeviceID) - - // A device id might have already been deleted before transaction - // closed. In that case this call will fail. Just leave a message - // in case of failure. - if err := devicemapper.DeleteDevice(devices.getPoolDevName(), devices.DeviceID); err != nil { - logrus.Errorf("devmapper: Unable to delete device: %s", err) - } - - dinfo := &devInfo{Hash: devices.DeviceIDHash} - if err := devices.removeMetadata(dinfo); err != nil { - logrus.Errorf("devmapper: Unable to remove metadata: %s", err) - } else { - devices.markDeviceIDFree(devices.DeviceID) - } - - if err := devices.removeTransactionMetaData(); err != nil { - logrus.Errorf("devmapper: Unable to remove transaction meta file %s: %s", devices.transactionMetaFile(), err) - } - - return nil -} - -func (devices *DeviceSet) processPendingTransaction() error { - if err := devices.loadTransactionMetaData(); err != nil { - return err - } - - // If there was open transaction but pool transaction ID is same - // as open transaction ID, nothing to roll back. - if devices.TransactionID == devices.OpenTransactionID { - return nil - } - - // If open transaction ID is less than pool transaction ID, something - // is wrong. Bail out. - if devices.OpenTransactionID < devices.TransactionID { - logrus.Errorf("devmapper: Open Transaction id %d is less than pool transaction id %d", devices.OpenTransactionID, devices.TransactionID) - return nil - } - - // Pool transaction ID is not same as open transaction. There is - // a transaction which was not completed. - if err := devices.rollbackTransaction(); err != nil { - return fmt.Errorf("devmapper: Rolling back open transaction failed: %s", err) - } - - devices.OpenTransactionID = devices.TransactionID - return nil -} - -func (devices *DeviceSet) loadDeviceSetMetaData() error { - jsonData, err := os.ReadFile(devices.deviceSetMetaFile()) - if err != nil { - // For backward compatibility return success if file does - // not exist. - if os.IsNotExist(err) { - return nil - } - return err - } - - return json.Unmarshal(jsonData, devices) -} - -func (devices *DeviceSet) saveDeviceSetMetaData() error { - jsonData, err := json.Marshal(devices) - if err != nil { - return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) - } - - return devices.writeMetaFile(jsonData, devices.deviceSetMetaFile()) -} - -func (devices *DeviceSet) openTransaction(hash string, DeviceID int) error { - devices.allocateTransactionID() - devices.DeviceIDHash = hash - devices.DeviceID = DeviceID - if err := devices.saveTransactionMetaData(); err != nil { - return fmt.Errorf("devmapper: Error saving transaction metadata: %s", err) - } - return nil -} - -func (devices *DeviceSet) refreshTransaction(DeviceID int) error { - devices.DeviceID = DeviceID - if err := devices.saveTransactionMetaData(); err != nil { - return fmt.Errorf("devmapper: Error saving transaction metadata: %s", err) - } - return nil -} - -func (devices *DeviceSet) closeTransaction() error { - if err := devices.updatePoolTransactionID(); err != nil { - logrus.Debug("devmapper: Failed to close Transaction") - return err - } - return nil -} - -func determineDriverCapabilities(version string) error { - // Kernel driver version >= 4.27.0 support deferred removal - - logrus.Debugf("devicemapper: kernel dm driver version is %s", version) - - versionSplit := strings.Split(version, ".") - major, err := strconv.Atoi(versionSplit[0]) - if err != nil { - return fmt.Errorf("unable to parse driver major version %q as a number: %w", versionSplit[0], graphdriver.ErrNotSupported) - } - - if major > 4 { - driverDeferredRemovalSupport = true - return nil - } - - if major < 4 { - return nil - } - - minor, err := strconv.Atoi(versionSplit[1]) - if err != nil { - return fmt.Errorf("unable to parse driver minor version %q as a number: %w", versionSplit[1], graphdriver.ErrNotSupported) - } - - /* - * If major is 4 and minor is 27, then there is no need to - * check for patch level as it can not be less than 0. - */ - if minor >= 27 { - driverDeferredRemovalSupport = true - return nil - } - - return nil -} - -// Determine the major and minor number of loopback device -func getDeviceMajorMinor(file *os.File) (uint64, uint64, error) { - var stat unix.Stat_t - err := unix.Stat(file.Name(), &stat) - if err != nil { - return 0, 0, err - } - - dev := stat.Rdev - majorNum := major(uint64(dev)) - minorNum := minor(uint64(dev)) - - logrus.Debugf("devmapper: Major:Minor for device: %s is:%v:%v", file.Name(), majorNum, minorNum) - return majorNum, minorNum, nil -} - -// Given a file which is backing file of a loop back device, find the -// loopback device name and its major/minor number. -func getLoopFileDeviceMajMin(filename string) (string, uint64, uint64, error) { - file, err := os.Open(filename) - if err != nil { - logrus.Debugf("devmapper: Failed to open file %s", filename) - return "", 0, 0, err - } - - defer file.Close() - loopbackDevice := loopback.FindLoopDeviceFor(file) - if loopbackDevice == nil { - return "", 0, 0, fmt.Errorf("devmapper: Unable to find loopback mount for: %s", filename) - } - defer loopbackDevice.Close() - - Major, Minor, err := getDeviceMajorMinor(loopbackDevice) - if err != nil { - return "", 0, 0, err - } - return loopbackDevice.Name(), Major, Minor, nil -} - -// Get the major/minor numbers of thin pool data and metadata devices -func (devices *DeviceSet) getThinPoolDataMetaMajMin() (uint64, uint64, uint64, uint64, error) { - var params, poolDataMajMin, poolMetadataMajMin string - - _, _, _, params, err := devicemapper.GetTable(devices.getPoolName()) - if err != nil { - return 0, 0, 0, 0, err - } - - if _, err = fmt.Sscanf(params, "%s %s", &poolMetadataMajMin, &poolDataMajMin); err != nil { - return 0, 0, 0, 0, err - } - - logrus.Debugf("devmapper: poolDataMajMin=%s poolMetaMajMin=%s\n", poolDataMajMin, poolMetadataMajMin) - - poolDataMajMinorSplit := strings.Split(poolDataMajMin, ":") - poolDataMajor, err := strconv.ParseUint(poolDataMajMinorSplit[0], 10, 32) - if err != nil { - return 0, 0, 0, 0, err - } - - poolDataMinor, err := strconv.ParseUint(poolDataMajMinorSplit[1], 10, 32) - if err != nil { - return 0, 0, 0, 0, err - } - - poolMetadataMajMinorSplit := strings.Split(poolMetadataMajMin, ":") - poolMetadataMajor, err := strconv.ParseUint(poolMetadataMajMinorSplit[0], 10, 32) - if err != nil { - return 0, 0, 0, 0, err - } - - poolMetadataMinor, err := strconv.ParseUint(poolMetadataMajMinorSplit[1], 10, 32) - if err != nil { - return 0, 0, 0, 0, err - } - - return poolDataMajor, poolDataMinor, poolMetadataMajor, poolMetadataMinor, nil -} - -func (devices *DeviceSet) loadThinPoolLoopBackInfo() error { - poolDataMajor, poolDataMinor, poolMetadataMajor, poolMetadataMinor, err := devices.getThinPoolDataMetaMajMin() - if err != nil { - return err - } - - dirname := devices.loopbackDir() - - // data device has not been passed in. So there should be a data file - // which is being mounted as loop device. - if devices.dataDevice == "" { - datafilename := path.Join(dirname, "data") - dataLoopDevice, dataMajor, dataMinor, err := getLoopFileDeviceMajMin(datafilename) - if err != nil { - return err - } - - // Compare the two - if poolDataMajor == dataMajor && poolDataMinor == dataMinor { - devices.dataDevice = dataLoopDevice - devices.dataLoopFile = datafilename - } - - } - - // metadata device has not been passed in. So there should be a - // metadata file which is being mounted as loop device. - if devices.metadataDevice == "" { - metadatafilename := path.Join(dirname, "metadata") - metadataLoopDevice, metadataMajor, metadataMinor, err := getLoopFileDeviceMajMin(metadatafilename) - if err != nil { - return err - } - if poolMetadataMajor == metadataMajor && poolMetadataMinor == metadataMinor { - devices.metadataDevice = metadataLoopDevice - devices.metadataLoopFile = metadatafilename - } - } - - return nil -} - -func (devices *DeviceSet) enableDeferredRemovalDeletion() error { - // If user asked for deferred removal then check both libdm library - // and kernel driver support deferred removal otherwise error out. - if enableDeferredRemoval { - if !driverDeferredRemovalSupport { - return fmt.Errorf("devmapper: Deferred removal can not be enabled as kernel does not support it") - } - if !devicemapper.LibraryDeferredRemovalSupport { - return fmt.Errorf("devmapper: Deferred removal can not be enabled as libdm does not support it") - } - logrus.Debug("devmapper: Deferred removal support enabled.") - devices.deferredRemove = true - } - - if enableDeferredDeletion { - if !devices.deferredRemove { - return fmt.Errorf("devmapper: Deferred deletion can not be enabled as deferred removal is not enabled. Enable deferred removal using --storage-opt dm.use_deferred_removal=true parameter") - } - logrus.Debug("devmapper: Deferred deletion support enabled.") - devices.deferredDelete = true - } - return nil -} - -func (devices *DeviceSet) initDevmapper(doInit bool) (retErr error) { - if err := devices.enableDeferredRemovalDeletion(); err != nil { - return err - } - - // https://github.com/docker/docker/issues/4036 - if supported := devicemapper.UdevSetSyncSupport(true); !supported { - logrus.Error("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a more recent version of libdevmapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/dockerd/#storage-driver-options") - - if !devices.overrideUdevSyncCheck { - return graphdriver.ErrNotSupported - } - } - - // create the root dir of the devmapper driver ownership to match this - // daemon's remapped root uid/gid so containers can start properly - uid, gid, err := idtools.GetRootUIDGID(devices.uidMaps, devices.gidMaps) - if err != nil { - return err - } - if err := idtools.MkdirAs(devices.root, 0o700, uid, gid); err != nil { - return err - } - if err := os.MkdirAll(devices.metadataDir(), 0o700); err != nil { - return err - } - if err := idtools.MkdirAs(filepath.Join(devices.root, "mnt"), 0o700, uid, gid); err != nil && !errors.Is(err, os.ErrExist) { - return err - } - - prevSetupConfig, err := readLVMConfig(devices.root) - if err != nil { - return err - } - - if !reflect.DeepEqual(devices.lvmSetupConfig, directLVMConfig{}) { - if devices.thinPoolDevice != "" { - return errors.New("cannot setup direct-lvm when `dm.thinpooldev` is also specified") - } - - if !reflect.DeepEqual(prevSetupConfig, devices.lvmSetupConfig) { - if !reflect.DeepEqual(prevSetupConfig, directLVMConfig{}) { - return errors.New("changing direct-lvm config is not supported") - } - logrus.WithField("storage-driver", "devicemapper").WithField("direct-lvm-config", devices.lvmSetupConfig).Debugf("Setting up direct lvm mode") - if err := verifyBlockDevice(devices.lvmSetupConfig.Device, lvmSetupConfigForce); err != nil { - return err - } - if err := setupDirectLVM(devices.lvmSetupConfig); err != nil { - return err - } - if err := writeLVMConfig(devices.root, devices.lvmSetupConfig); err != nil { - return err - } - } - devices.thinPoolDevice = "storage-thinpool" - logrus.WithField("storage-driver", "devicemapper").Debugf("Setting dm.thinpooldev to %q", devices.thinPoolDevice) - } - - // Set the device prefix from the device id and inode of the storage root dir - var st unix.Stat_t - if err := unix.Stat(devices.root, &st); err != nil { - return fmt.Errorf("devmapper: Error looking up dir %s: %s", devices.root, err) - } - // "reg-" stands for "regular file". - // In the future we might use "dev-" for "device file", etc. - // container-maj,min[-inode] stands for: - // - Managed by container storage - // - The target of this device is at major and minor - // - If is defined, use that file inside the device as a loopback image. Otherwise use the device itself. - devices.devicePrefix = fmt.Sprintf("container-%d:%d-%d", major(uint64(st.Dev)), minor(uint64(st.Dev)), st.Ino) - logrus.Debugf("devmapper: Generated prefix: %s", devices.devicePrefix) - - // Check for the existence of the thin-pool device - poolExists, err := devices.thinPoolExists(devices.getPoolName()) - if err != nil { - return err - } - - // It seems libdevmapper opens this without O_CLOEXEC, and go exec will not close files - // that are not Close-on-exec, - // so we add this badhack to make sure it closes itself - setCloseOnExec("/dev/mapper/control") - - // Make sure the sparse images exist in /devicemapper/data and - // /devicemapper/metadata - - createdLoopback := false - - // If the pool doesn't exist, create it - if !poolExists && devices.thinPoolDevice == "" { - logrus.Debug("devmapper: Pool doesn't exist. Creating it.") - - var ( - dataFile *os.File - metadataFile *os.File - ) - - fsMagic, err := graphdriver.GetFSMagic(devices.loopbackDir()) - if err != nil { - return err - } - switch fsMagic { - case graphdriver.FsMagicAufs: - return fmt.Errorf("devmapper: Loopback devices can not be created on AUFS filesystems") - } - - if devices.dataDevice == "" { - // Make sure the sparse images exist in /devicemapper/data - - hasData := devices.hasImage("data") - - if !doInit && !hasData { - return errors.New("loopback data file not found") - } - - if !hasData { - createdLoopback = true - } - - data, err := devices.ensureImage("data", devices.dataLoopbackSize) - if err != nil { - logrus.Debugf("devmapper: Error device ensureImage (data): %s", err) - return err - } - - dataFile, err = loopback.AttachLoopDevice(data) - if err != nil { - return err - } - devices.dataLoopFile = data - devices.dataDevice = dataFile.Name() - } else { - dataFile, err = os.OpenFile(devices.dataDevice, os.O_RDWR, 0o600) - if err != nil { - return err - } - } - defer dataFile.Close() - - if devices.metadataDevice == "" { - // Make sure the sparse images exist in /devicemapper/metadata - - hasMetadata := devices.hasImage("metadata") - - if !doInit && !hasMetadata { - return errors.New("loopback metadata file not found") - } - - if !hasMetadata { - createdLoopback = true - } - - metadata, err := devices.ensureImage("metadata", devices.metaDataLoopbackSize) - if err != nil { - logrus.Debugf("devmapper: Error device ensureImage (metadata): %s", err) - return err - } - - metadataFile, err = loopback.AttachLoopDevice(metadata) - if err != nil { - return err - } - devices.metadataLoopFile = metadata - devices.metadataDevice = metadataFile.Name() - } else { - metadataFile, err = os.OpenFile(devices.metadataDevice, os.O_RDWR, 0o600) - if err != nil { - return err - } - } - defer metadataFile.Close() - - if err := devicemapper.CreatePool(devices.getPoolName(), dataFile, metadataFile, devices.thinpBlockSize); err != nil { - return err - } - defer func() { - if retErr != nil { - err = devices.deactivatePool() - if err != nil { - logrus.Warnf("devmapper: Failed to deactivatePool: %v", err) - } - } - }() - } - - // Pool already exists and caller did not pass us a pool. That means - // we probably created pool earlier and could not remove it as some - // containers were still using it. Detect some of the properties of - // pool, like is it using loop devices. - if poolExists && devices.thinPoolDevice == "" { - if err := devices.loadThinPoolLoopBackInfo(); err != nil { - logrus.Debugf("devmapper: Failed to load thin pool loopback device information:%v", err) - return err - } - } - - // If we didn't just create the data or metadata image, we need to - // load the transaction id and migrate old metadata - if !createdLoopback { - if err := devices.initMetaData(); err != nil { - return err - } - } - - if devices.thinPoolDevice == "" { - if devices.metadataLoopFile != "" || devices.dataLoopFile != "" { - logrus.Warn("devmapper: Usage of loopback devices is strongly discouraged for production use. Please use `--storage-opt dm.thinpooldev`.") - } - } - - // Right now this loads only NextDeviceID. If there is more metadata - // down the line, we might have to move it earlier. - if err := devices.loadDeviceSetMetaData(); err != nil { - return err - } - - // Setup the base image - if doInit { - if err := devices.setupBaseImage(); err != nil { - logrus.Debugf("devmapper: Error device setupBaseImage: %s", err) - return err - } - } - - return nil -} - -// AddDevice adds a device and registers in the hash. -func (devices *DeviceSet) AddDevice(hash, baseHash string, storageOpt map[string]string) error { - logrus.Debugf("devmapper: AddDevice START(hash=%s basehash=%s)", hash, baseHash) - defer logrus.Debugf("devmapper: AddDevice END(hash=%s basehash=%s)", hash, baseHash) - - // If a deleted device exists, return error. - baseInfo, err := devices.lookupDeviceWithLock(baseHash) - if err != nil { - return err - } - - if baseInfo.Deleted { - return fmt.Errorf("devmapper: Base device %v has been marked for deferred deletion", baseInfo.Hash) - } - - baseInfo.lock.Lock() - defer baseInfo.lock.Unlock() - - devices.Lock() - defer devices.Unlock() - - // Also include deleted devices in case hash of new device is - // same as one of the deleted devices. - if info, _ := devices.lookupDevice(hash); info != nil { - return fmt.Errorf("devmapper: device %s already exists. Deleted=%v", hash, info.Deleted) - } - - size, err := devices.parseStorageOpt(storageOpt) - if err != nil { - return err - } - - if size == 0 { - size = baseInfo.Size - } - - if size < baseInfo.Size { - return fmt.Errorf("devmapper: Container size cannot be smaller than %s", units.HumanSize(float64(baseInfo.Size))) - } - - if err := devices.takeSnapshot(hash, baseInfo, size); err != nil { - return err - } - - // Grow the container rootfs. - if size > baseInfo.Size { - info, err := devices.lookupDevice(hash) - if err != nil { - return err - } - - if err := devices.growFS(info); err != nil { - return err - } - } - - return nil -} - -func (devices *DeviceSet) parseStorageOpt(storageOpt map[string]string) (uint64, error) { - // Read size to change the block device size per container. - for key, val := range storageOpt { - key := strings.ToLower(key) - switch key { - case "size": - size, err := units.RAMInBytes(val) - if err != nil { - return 0, err - } - return uint64(size), nil - default: - return 0, fmt.Errorf("unknown option %s", key) - } - } - - return 0, nil -} - -func (devices *DeviceSet) markForDeferredDeletion(info *devInfo) error { - // If device is already in deleted state, there is nothing to be done. - if info.Deleted { - return nil - } - - logrus.Debugf("devmapper: Marking device %s for deferred deletion.", info.Hash) - - info.Deleted = true - - // save device metadata to reflect deleted state. - if err := devices.saveMetadata(info); err != nil { - info.Deleted = false - return err - } - - devices.nrDeletedDevices++ - return nil -} - -// Should be called with devices.Lock() held. -func (devices *DeviceSet) deleteDeviceNoLock(info *devInfo, syncDelete bool) error { - err := devicemapper.DeleteDevice(devices.getPoolDevName(), info.DeviceID) - if err != nil { - // If syncDelete is true, we want to return error. If deferred - // deletion is not enabled, we return an error. If error is - // something other then EBUSY, return an error. - if syncDelete || !devices.deferredDelete || !errors.Is(err, devicemapper.ErrBusy) { - logrus.Debugf("devmapper: Error deleting device: %s", err) - return err - } - } - - if err == nil { - if err := devices.unregisterDevice(info.Hash); err != nil { - return err - } - // If device was already in deferred delete state that means - // deletion was being tried again later. Reduce the deleted - // device count. - if info.Deleted { - devices.nrDeletedDevices-- - } - devices.markDeviceIDFree(info.DeviceID) - } else { - if err := devices.markForDeferredDeletion(info); err != nil { - return err - } - } - - return nil -} - -// Issue discard only if device open count is zero. -func (devices *DeviceSet) issueDiscard(info *devInfo) error { - logrus.Debugf("devmapper: issueDiscard START(device: %s).", info.Hash) - defer logrus.Debugf("devmapper: issueDiscard END(device: %s).", info.Hash) - // This is a workaround for the kernel not discarding block so - // on the thin pool when we remove a thinp device, so we do it - // manually. - // Even if device is deferred deleted, activate it and issue - // discards. - if err := devices.activateDeviceIfNeeded(info, true); err != nil { - return err - } - - devinfo, err := devicemapper.GetInfo(info.Name()) - if err != nil { - return err - } - - if devinfo.OpenCount != 0 { - logrus.Debugf("devmapper: Device: %s is in use. OpenCount=%d. Not issuing discards.", info.Hash, devinfo.OpenCount) - return nil - } - - if err := devicemapper.BlockDeviceDiscard(info.DevName()); err != nil { - logrus.Debugf("devmapper: Error discarding block on device: %s (ignoring)", err) - } - return nil -} - -// Should be called with devices.Lock() held. -func (devices *DeviceSet) deleteDevice(info *devInfo, syncDelete bool) error { - if err := devices.openTransaction(info.Hash, info.DeviceID); err != nil { - logrus.WithField("storage-driver", "devicemapper").Debugf("Error opening transaction hash = %s deviceId = %d", info.Hash, info.DeviceID) - return err - } - - defer devices.closeTransaction() - - if devices.doBlkDiscard { - devices.issueDiscard(info) - } - - // Try to deactivate device in case it is active. - // If deferred removal is enabled and deferred deletion is disabled - // then make sure device is removed synchronously. There have been - // some cases of device being busy for short duration and we would - // rather busy wait for device removal to take care of these cases. - deferredRemove := devices.deferredRemove - if !devices.deferredDelete { - deferredRemove = false - } - - if err := devices.deactivateDeviceMode(info, deferredRemove); err != nil { - logrus.Debugf("devmapper: Error deactivating device: %s", err) - return err - } - - if err := devices.deleteDeviceNoLock(info, syncDelete); err != nil { - return err - } - - return nil -} - -// DeleteDevice will return success if device has been marked for deferred -// removal. If one wants to override that and want DeleteDevice() to fail if -// device was busy and could not be deleted, set syncDelete=true. -func (devices *DeviceSet) DeleteDevice(hash string, syncDelete bool) error { - logrus.Debugf("devmapper: DeleteDevice START(hash=%v syncDelete=%v)", hash, syncDelete) - defer logrus.Debugf("devmapper: DeleteDevice END(hash=%v syncDelete=%v)", hash, syncDelete) - info, err := devices.lookupDeviceWithLock(hash) - if err != nil { - return err - } - - info.lock.Lock() - defer info.lock.Unlock() - - devices.Lock() - defer devices.Unlock() - - return devices.deleteDevice(info, syncDelete) -} - -func (devices *DeviceSet) deactivatePool() error { - logrus.Debug("devmapper: deactivatePool() START") - defer logrus.Debug("devmapper: deactivatePool() END") - devname := devices.getPoolDevName() - - devinfo, err := devicemapper.GetInfo(devname) - if err != nil { - return err - } - - if devinfo.Exists == 0 { - return nil - } - if err := devicemapper.RemoveDevice(devname); err != nil { - return err - } - - if d, err := devicemapper.GetDeps(devname); err == nil { - logrus.Warnf("devmapper: device %s still has %d active dependents", devname, d.Count) - } - - return nil -} - -func (devices *DeviceSet) deactivateDevice(info *devInfo) error { - return devices.deactivateDeviceMode(info, devices.deferredRemove) -} - -func (devices *DeviceSet) deactivateDeviceMode(info *devInfo, deferredRemove bool) error { - var err error - logrus.Debugf("devmapper: deactivateDevice START(%s)", info.Hash) - defer logrus.Debugf("devmapper: deactivateDevice END(%s)", info.Hash) - - devinfo, err := devicemapper.GetInfo(info.Name()) - if err != nil { - return err - } - - if devinfo.Exists == 0 { - return nil - } - - if deferredRemove { - err = devicemapper.RemoveDeviceDeferred(info.Name()) - } else { - err = devices.removeDevice(info.Name()) - } - - // This function's semantics is such that it does not return an - // error if device does not exist. So if device went away by - // the time we actually tried to remove it, do not return error. - if !errors.Is(err, devicemapper.ErrEnxio) { - return err - } - return nil -} - -// Issues the underlying dm remove operation. -func (devices *DeviceSet) removeDevice(devname string) error { - var err error - - logrus.Debugf("devmapper: removeDevice START(%s)", devname) - defer logrus.Debugf("devmapper: removeDevice END(%s)", devname) - - for i := 0; i < 200; i++ { - err = devicemapper.RemoveDevice(devname) - if err == nil { - break - } - if !errors.Is(err, devicemapper.ErrBusy) { - return err - } - - // If we see EBUSY it may be a transient error, - // sleep a bit a retry a few times. - devices.Unlock() - time.Sleep(100 * time.Millisecond) - devices.Lock() - } - - return err -} - -func (devices *DeviceSet) cancelDeferredRemovalIfNeeded(info *devInfo) error { - if !devices.deferredRemove { - return nil - } - - logrus.Debugf("devmapper: cancelDeferredRemovalIfNeeded START(%s)", info.Name()) - defer logrus.Debugf("devmapper: cancelDeferredRemovalIfNeeded END(%s)", info.Name()) - - devinfo, err := devicemapper.GetInfoWithDeferred(info.Name()) - if err != nil { - return err - } - - if devinfo != nil && devinfo.DeferredRemove == 0 { - return nil - } - - // Cancel deferred remove - if err := devices.cancelDeferredRemoval(info); err != nil { - // If Error is ErrEnxio. Device is probably already gone. Continue. - if !errors.Is(err, devicemapper.ErrEnxio) { - return err - } - } - return nil -} - -func (devices *DeviceSet) cancelDeferredRemoval(info *devInfo) error { - logrus.Debugf("devmapper: cancelDeferredRemoval START(%s)", info.Name()) - defer logrus.Debugf("devmapper: cancelDeferredRemoval END(%s)", info.Name()) - - var err error - - // Cancel deferred remove - for i := 0; i < 100; i++ { - err = devicemapper.CancelDeferredRemove(info.Name()) - if err != nil { - if !errors.Is(err, devicemapper.ErrBusy) { - // If we see EBUSY it may be a transient error, - // sleep a bit a retry a few times. - devices.Unlock() - time.Sleep(100 * time.Millisecond) - devices.Lock() - continue - } - } - break - } - return err -} - -func (devices *DeviceSet) unmountAndDeactivateAll(dir string) { - files, err := os.ReadDir(dir) - if err != nil { - logrus.Warnf("devmapper: unmountAndDeactivate: %s", err) - return - } - - for _, d := range files { - if !d.IsDir() { - continue - } - - name := d.Name() - fullname := path.Join(dir, name) - - // We use MNT_DETACH here in case it is still busy in some running - // container. This means it'll go away from the global scope directly, - // and the device will be released when that container dies. - if err := mount.Unmount(fullname); err != nil { - logrus.Warnf("devmapper.Shutdown error: %s", err) - } - - if devInfo, err := devices.lookupDevice(name); err != nil { - logrus.Debugf("devmapper: Shutdown lookup device %s, error: %s", name, err) - } else { - if err := devices.deactivateDevice(devInfo); err != nil { - logrus.Debugf("devmapper: Shutdown deactivate %s, error: %s", devInfo.Hash, err) - } - } - } -} - -// Shutdown shuts down the device by unmounting the root. -func (devices *DeviceSet) Shutdown(home string) error { - logrus.Debugf("devmapper: [deviceset %s] Shutdown()", devices.devicePrefix) - logrus.Debugf("devmapper: Shutting down DeviceSet: %s", devices.root) - defer logrus.Debugf("devmapper: [deviceset %s] Shutdown() END", devices.devicePrefix) - - // Stop deletion worker. This should start delivering new events to - // ticker channel. That means no new instance of cleanupDeletedDevice() - // will run after this call. If one instance is already running at - // the time of the call, it must be holding devices.Lock() and - // we will block on this lock till cleanup function exits. - devices.deletionWorkerTicker.Stop() - - devices.Lock() - // Save DeviceSet Metadata first. Docker kills all threads if they - // don't finish in certain time. It is possible that Shutdown() - // routine does not finish in time as we loop trying to deactivate - // some devices while these are busy. In that case shutdown() routine - // will be killed and we will not get a chance to save deviceset - // metadata. Hence save this early before trying to deactivate devices. - devices.saveDeviceSetMetaData() - devices.unmountAndDeactivateAll(path.Join(home, "mnt")) - devices.Unlock() - - info, _ := devices.lookupDeviceWithLock("") - if info != nil { - info.lock.Lock() - devices.Lock() - if err := devices.deactivateDevice(info); err != nil { - logrus.Debugf("devmapper: Shutdown deactivate base, error: %s", err) - } - devices.Unlock() - info.lock.Unlock() - } - - devices.Lock() - if devices.thinPoolDevice == "" { - if err := devices.deactivatePool(); err != nil { - logrus.Debugf("devmapper: Shutdown deactivate pool, error: %s", err) - } - } - devices.Unlock() - - return nil -} - -// Recent XFS changes allow changing behavior of filesystem in case of errors. -// When thin pool gets full and XFS gets ENOSPC error, currently it tries -// IO infinitely and sometimes it can block the container process -// and process can't be killWith 0 value, XFS will not retry upon error -// and instead will shutdown filesystem. - -func (devices *DeviceSet) xfsSetNospaceRetries(info *devInfo) error { - dmDevicePath, err := os.Readlink(info.DevName()) - if err != nil { - return fmt.Errorf("devmapper: readlink failed for device %v:%w", info.DevName(), err) - } - - dmDeviceName := path.Base(dmDevicePath) - filePath := "/sys/fs/xfs/" + dmDeviceName + "/error/metadata/ENOSPC/max_retries" - maxRetriesFile, err := os.OpenFile(filePath, os.O_WRONLY, 0) - if err != nil { - return fmt.Errorf("devmapper: user specified daemon option dm.xfs_nospace_max_retries but it does not seem to be supported on this system :%w", err) - } - defer maxRetriesFile.Close() - - // Set max retries to 0 - _, err = maxRetriesFile.WriteString(devices.xfsNospaceRetries) - if err != nil { - return fmt.Errorf("devmapper: Failed to write string %v to file %v:%w", devices.xfsNospaceRetries, filePath, err) - } - return nil -} - -// MountDevice mounts the device if not already mounted. -func (devices *DeviceSet) MountDevice(hash, path string, moptions graphdriver.MountOpts) error { - info, err := devices.lookupDeviceWithLock(hash) - if err != nil { - return err - } - - if info.Deleted { - return fmt.Errorf("devmapper: Can't mount device %v as it has been marked for deferred deletion", info.Hash) - } - - info.lock.Lock() - defer info.lock.Unlock() - - devices.Lock() - defer devices.Unlock() - - if err := devices.activateDeviceIfNeeded(info, false); err != nil { - return fmt.Errorf("devmapper: Error activating devmapper device for '%s': %s", hash, err) - } - - fstype, err := ProbeFsType(info.DevName()) - if err != nil { - return err - } - - options := "" - - if fstype == xfs { - // XFS needs nouuid or it can't mount filesystems with the same fs - options = joinMountOptions(options, "nouuid") - } - - mountOptions := devices.mountOptions - if len(moptions.Options) > 0 { - addNouuid := strings.Contains("nouuid", mountOptions) - mountOptions = strings.Join(moptions.Options, ",") - if addNouuid { - mountOptions = fmt.Sprintf("nouuid,%s", mountOptions) - } - } - - options = joinMountOptions(options, mountOptions) - options = joinMountOptions(options, label.FormatMountLabel("", moptions.MountLabel)) - - if err := mount.Mount(info.DevName(), path, fstype, options); err != nil { - return fmt.Errorf("failed to mount; dmesg: %s: %w", string(dmesg.Dmesg(256)), err) - } - - if fstype == xfs && devices.xfsNospaceRetries != "" { - if err := devices.xfsSetNospaceRetries(info); err != nil { - if err := mount.Unmount(path); err != nil { - logrus.Warnf("devmapper.MountDevice cleanup error: %v", err) - } - devices.deactivateDevice(info) - return err - } - } - - return nil -} - -// UnmountDevice unmounts the device and removes it from hash. -func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error { - logrus.Debugf("devmapper: UnmountDevice START(hash=%s)", hash) - defer logrus.Debugf("devmapper: UnmountDevice END(hash=%s)", hash) - - info, err := devices.lookupDeviceWithLock(hash) - if err != nil { - return err - } - - info.lock.Lock() - defer info.lock.Unlock() - - devices.Lock() - defer devices.Unlock() - - logrus.Debugf("devmapper: Unmount(%s)", mountPath) - if err := mount.Unmount(mountPath); err != nil { - if ok, _ := Mounted(mountPath); ok { - return err - } - } - logrus.Debug("devmapper: Unmount done") - - // Remove the mountpoint here. Removing the mountpoint (in newer kernels) - // will cause all other instances of this mount in other mount namespaces - // to be killed (this is an anti-DoS measure that is necessary for things - // like devicemapper). This is necessary to avoid cases where a libdm mount - // that is present in another namespace will cause subsequent RemoveDevice - // operations to fail. We ignore any errors here because this may fail on - // older kernels which don't have - // torvalds/linux@8ed936b5671bfb33d89bc60bdcc7cf0470ba52fe applied. - if err := os.Remove(mountPath); err != nil { - logrus.Debugf("devmapper: error doing a remove on unmounted device %s: %v", mountPath, err) - } - - return devices.deactivateDevice(info) -} - -// HasDevice returns true if the device metadata exists. -func (devices *DeviceSet) HasDevice(hash string) bool { - info, _ := devices.lookupDeviceWithLock(hash) - return info != nil -} - -// List returns a list of device ids. -func (devices *DeviceSet) List() []string { - devices.Lock() - defer devices.Unlock() - - ids := make([]string, len(devices.Devices)) - i := 0 - for k := range devices.Devices { - ids[i] = k - i++ - } - return ids -} - -// ListLayers returns a list of device IDs, omitting the ""/"base" device and -// any which have been marked as deleted. -func (devices *DeviceSet) ListLayers() ([]string, error) { - if err := devices.cleanupDeletedDevices(); err != nil { - return nil, err - } - - devices.Lock() - defer devices.Unlock() - - ids := make([]string, 0, len(devices.Devices)) - for k, d := range devices.Devices { - if k == "" || d.Deleted { - continue - } - ids = append(ids, k) - } - return ids, nil -} - -func (devices *DeviceSet) deviceStatus(devName string) (sizeInSectors, mappedSectors, highestMappedSector uint64, err error) { - var params string - _, sizeInSectors, _, params, err = devicemapper.GetStatus(devName) - if err != nil { - logrus.Debugf("could not find devicemapper status: %v", err) - return - } - if _, err = fmt.Sscanf(params, "%d %d", &mappedSectors, &highestMappedSector); err != nil { - logrus.Debugf("could not find scanf devicemapper status: %v", err) - } - return -} - -// GetDeviceStatus provides size, mapped sectors -func (devices *DeviceSet) GetDeviceStatus(hash string) (*DevStatus, error) { - info, err := devices.lookupDeviceWithLock(hash) - if err != nil { - return nil, err - } - - info.lock.Lock() - defer info.lock.Unlock() - - devices.Lock() - defer devices.Unlock() - - status := &DevStatus{ - DeviceID: info.DeviceID, - Size: info.Size, - TransactionID: info.TransactionID, - } - - if err := devices.activateDeviceIfNeeded(info, false); err != nil { - return nil, fmt.Errorf("devmapper: Error activating devmapper device for '%s': %s", hash, err) - } - - sizeInSectors, mappedSectors, highestMappedSector, err := devices.deviceStatus(info.DevName()) - if err != nil { - return nil, err - } - - status.SizeInSectors = sizeInSectors - status.MappedSectors = mappedSectors - status.HighestMappedSector = highestMappedSector - - return status, nil -} - -func (devices *DeviceSet) poolStatus() (totalSizeInSectors, transactionID, dataUsed, dataTotal, metadataUsed, metadataTotal uint64, err error) { - var params string - if _, totalSizeInSectors, _, params, err = devicemapper.GetStatus(devices.getPoolName()); err == nil { - _, err = fmt.Sscanf(params, "%d %d/%d %d/%d", &transactionID, &metadataUsed, &metadataTotal, &dataUsed, &dataTotal) - } - return -} - -// DataDevicePath returns the path to the data storage for this deviceset, -// regardless of loopback or block device -func (devices *DeviceSet) DataDevicePath() string { - return devices.dataDevice -} - -// MetadataDevicePath returns the path to the metadata storage for this deviceset, -// regardless of loopback or block device -func (devices *DeviceSet) MetadataDevicePath() string { - return devices.metadataDevice -} - -func (devices *DeviceSet) getUnderlyingAvailableSpace(loopFile string) (uint64, error) { - buf := new(unix.Statfs_t) - if err := unix.Statfs(loopFile, buf); err != nil { - logrus.Warnf("devmapper: Couldn't stat loopfile filesystem %v: %v", loopFile, err) - return 0, err - } - return buf.Bfree * uint64(buf.Bsize), nil -} - -func (devices *DeviceSet) isRealFile(loopFile string) (bool, error) { - if loopFile != "" { - fi, err := os.Stat(loopFile) - if err != nil { - logrus.Warnf("devmapper: Couldn't stat loopfile %v: %v", loopFile, err) - return false, err - } - return fi.Mode().IsRegular(), nil - } - return false, nil -} - -// Status returns the current status of this deviceset -func (devices *DeviceSet) Status() *Status { - devices.Lock() - defer devices.Unlock() - - status := &Status{} - - status.PoolName = devices.getPoolName() - status.DataFile = devices.DataDevicePath() - status.DataLoopback = devices.dataLoopFile - status.MetadataFile = devices.MetadataDevicePath() - status.MetadataLoopback = devices.metadataLoopFile - status.UdevSyncSupported = devicemapper.UdevSyncSupported() - status.DeferredRemoveEnabled = devices.deferredRemove - status.DeferredDeleteEnabled = devices.deferredDelete - status.DeferredDeletedDeviceCount = devices.nrDeletedDevices - status.BaseDeviceSize = devices.getBaseDeviceSize() - status.BaseDeviceFS = devices.getBaseDeviceFS() - - totalSizeInSectors, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus() - if err == nil { - // Convert from blocks to bytes - blockSizeInSectors := totalSizeInSectors / dataTotal - - status.Data.Used = dataUsed * blockSizeInSectors * 512 - status.Data.Total = dataTotal * blockSizeInSectors * 512 - status.Data.Available = status.Data.Total - status.Data.Used - - // metadata blocks are always 4k - status.Metadata.Used = metadataUsed * 4096 - status.Metadata.Total = metadataTotal * 4096 - status.Metadata.Available = status.Metadata.Total - status.Metadata.Used - - status.SectorSize = blockSizeInSectors * 512 - - if check, _ := devices.isRealFile(devices.dataLoopFile); check { - actualSpace, err := devices.getUnderlyingAvailableSpace(devices.dataLoopFile) - if err == nil && actualSpace < status.Data.Available { - status.Data.Available = actualSpace - } - } - - if check, _ := devices.isRealFile(devices.metadataLoopFile); check { - actualSpace, err := devices.getUnderlyingAvailableSpace(devices.metadataLoopFile) - if err == nil && actualSpace < status.Metadata.Available { - status.Metadata.Available = actualSpace - } - } - - minFreeData := (dataTotal * uint64(devices.minFreeSpacePercent)) / 100 - status.MinFreeSpace = minFreeData * blockSizeInSectors * 512 - } - - return status -} - -// Status returns the current status of this deviceset -func (devices *DeviceSet) exportDeviceMetadata(hash string) (*deviceMetadata, error) { - info, err := devices.lookupDeviceWithLock(hash) - if err != nil { - return nil, err - } - - info.lock.Lock() - defer info.lock.Unlock() - - metadata := &deviceMetadata{info.DeviceID, info.Size, info.Name()} - return metadata, nil -} - -// NewDeviceSet creates the device set based on the options provided. -func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps []idtools.IDMap) (*DeviceSet, error) { - devicemapper.SetDevDir("/dev") - - devices := &DeviceSet{ - root: root, - metaData: metaData{Devices: make(map[string]*devInfo)}, - dataLoopbackSize: defaultDataLoopbackSize, - metaDataLoopbackSize: defaultMetaDataLoopbackSize, - baseFsSize: defaultBaseFsSize, - overrideUdevSyncCheck: defaultUdevSyncOverride, - doBlkDiscard: true, - thinpBlockSize: defaultThinpBlockSize, - deviceIDMap: make([]byte, deviceIDMapSz), - deletionWorkerTicker: time.NewTicker(time.Second * 30), - uidMaps: uidMaps, - gidMaps: gidMaps, - minFreeSpacePercent: defaultMinFreeSpacePercent, - } - - version, err := devicemapper.GetDriverVersion() - if err != nil { - // Can't even get driver version, assume not supported - return nil, graphdriver.ErrNotSupported - } - - if err := determineDriverCapabilities(version); err != nil { - return nil, graphdriver.ErrNotSupported - } - - if driverDeferredRemovalSupport && devicemapper.LibraryDeferredRemovalSupport { - // enable deferred stuff by default - enableDeferredDeletion = true - enableDeferredRemoval = true - } - - foundBlkDiscard := false - var lvmSetupConfig directLVMConfig - testMode := false - for _, option := range options { - key, val, err := parsers.ParseKeyValueOpt(option) - if err != nil { - return nil, err - } - key = strings.ToLower(key) - switch key { - case "dm.basesize": - size, err := units.RAMInBytes(val) - if err != nil { - return nil, err - } - userBaseSize = true - devices.baseFsSize = uint64(size) - case "dm.loopdatasize": - size, err := units.RAMInBytes(val) - if err != nil { - return nil, err - } - devices.dataLoopbackSize = size - case "dm.loopmetadatasize": - size, err := units.RAMInBytes(val) - if err != nil { - return nil, err - } - devices.metaDataLoopbackSize = size - case "dm.fs": - if val != ext4 && val != xfs { - return nil, fmt.Errorf("devmapper: Unsupported filesystem %s", val) - } - devices.filesystem = val - case "dm.mkfsarg": - devices.mkfsArgs = append(devices.mkfsArgs, val) - case "dm.mountopt", "devicemapper.mountopt": - devices.mountOptions = joinMountOptions(devices.mountOptions, val) - case "dm.metadatadev": - devices.metadataDevice = val - case "dm.metadata_size": - devices.metaDataSize = val - case "dm.datadev": - devices.dataDevice = val - case "dm.thinpooldev": - devices.thinPoolDevice = strings.TrimPrefix(val, "/dev/mapper/") - case "dm.blkdiscard": - foundBlkDiscard = true - devices.doBlkDiscard, err = strconv.ParseBool(val) - if err != nil { - return nil, err - } - case "dm.blocksize": - size, err := units.RAMInBytes(val) - if err != nil { - return nil, err - } - // convert to 512b sectors - devices.thinpBlockSize = uint32(size) >> 9 - case "dm.override_udev_sync_check": - devices.overrideUdevSyncCheck, err = strconv.ParseBool(val) - if err != nil { - return nil, err - } - - case "dm.use_deferred_removal": - enableDeferredRemoval, err = strconv.ParseBool(val) - if err != nil { - return nil, err - } - - case "dm.use_deferred_deletion": - enableDeferredDeletion, err = strconv.ParseBool(val) - if err != nil { - return nil, err - } - - case "dm.metaDataSize": - lvmSetupConfig.MetaDataSize = val - case "dm.min_free_space": - if !strings.HasSuffix(val, "%") { - return nil, fmt.Errorf("devmapper: Option dm.min_free_space requires %% suffix") - } - - valstring := strings.TrimSuffix(val, "%") - minFreeSpacePercent, err := strconv.ParseUint(valstring, 10, 32) - if err != nil { - return nil, err - } - - if minFreeSpacePercent >= 100 { - return nil, fmt.Errorf("devmapper: Invalid value %v for option dm.min_free_space", val) - } - - devices.minFreeSpacePercent = uint32(minFreeSpacePercent) - case "dm.xfs_nospace_max_retries": - _, err := strconv.ParseUint(val, 10, 64) - if err != nil { - return nil, err - } - devices.xfsNospaceRetries = val - case "dm.directlvm_device": - lvmSetupConfig.Device = val - case "dm.directlvm_device_force": - lvmSetupConfigForce, err = strconv.ParseBool(val) - if err != nil { - return nil, err - } - case "dm.thinp_percent": - per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) - if err != nil { - return nil, fmt.Errorf("could not parse `dm.thinp_percent=%s`: %w", val, err) - } - if per >= 100 { - return nil, errors.New("dm.thinp_percent must be greater than 0 and less than 100") - } - lvmSetupConfig.ThinpPercent = per - case "dm.thinp_metapercent": - per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) - if err != nil { - return nil, fmt.Errorf("could not parse `dm.thinp_metapercent=%s`: %w", val, err) - } - if per >= 100 { - return nil, errors.New("dm.thinp_metapercent must be greater than 0 and less than 100") - } - lvmSetupConfig.ThinpMetaPercent = per - case "dm.thinp_autoextend_percent": - per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) - if err != nil { - return nil, fmt.Errorf("could not parse `dm.thinp_autoextend_percent=%s`: %w", val, err) - } - if per > 100 { - return nil, errors.New("dm.thinp_autoextend_percent must be greater than 0 and less than 100") - } - lvmSetupConfig.AutoExtendPercent = per - case "dm.thinp_autoextend_threshold": - per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) - if err != nil { - return nil, fmt.Errorf("could not parse `dm.thinp_autoextend_threshold=%s`: %w", val, err) - } - if per > 100 { - return nil, errors.New("dm.thinp_autoextend_threshold must be greater than 0 and less than 100") - } - lvmSetupConfig.AutoExtendThreshold = per - case "dm.libdm_log_level": - level, err := strconv.ParseInt(val, 10, 32) - if err != nil { - return nil, fmt.Errorf("could not parse `dm.libdm_log_level=%s`: %w", val, err) - } - if level < devicemapper.LogLevelFatal || level > devicemapper.LogLevelDebug { - return nil, fmt.Errorf("dm.libdm_log_level must be in range [%d,%d]", devicemapper.LogLevelFatal, devicemapper.LogLevelDebug) - } - // Register a new logging callback with the specified level. - devicemapper.LogInit(devicemapper.DefaultLogger{ - Level: int(level), - }) - case "test": - testMode, err = strconv.ParseBool(val) - if err != nil { - return nil, err - } - default: - return nil, fmt.Errorf("devmapper: Unknown option %s", key) - } - } - - if !testMode { - if err := validateLVMConfig(lvmSetupConfig); err != nil { - return nil, err - } - } - - devices.lvmSetupConfig = lvmSetupConfig - - // By default, don't do blk discard hack on raw devices, its rarely useful and is expensive - if !foundBlkDiscard && (devices.dataDevice != "" || devices.thinPoolDevice != "") { - devices.doBlkDiscard = false - } - - if err := devices.initDevmapper(doInit); err != nil { - return nil, err - } - - return devices, nil -} diff --git a/vendor/github.com/containers/storage/drivers/devmapper/devmapper_doc.go b/vendor/github.com/containers/storage/drivers/devmapper/devmapper_doc.go deleted file mode 100644 index f85fb94790..0000000000 --- a/vendor/github.com/containers/storage/drivers/devmapper/devmapper_doc.go +++ /dev/null @@ -1,109 +0,0 @@ -//go:build linux && cgo -// +build linux,cgo - -package devmapper - -// Definition of struct dm_task and sub structures (from lvm2) -// -// struct dm_ioctl { -// /* -// * The version number is made up of three parts: -// * major - no backward or forward compatibility, -// * minor - only backwards compatible, -// * patch - both backwards and forwards compatible. -// * -// * All clients of the ioctl interface should fill in the -// * version number of the interface that they were -// * compiled with. -// * -// * All recognized ioctl commands (ie. those that don't -// * return -ENOTTY) fill out this field, even if the -// * command failed. -// */ -// uint32_t version[3]; /* in/out */ -// uint32_t data_size; /* total size of data passed in -// * including this struct */ - -// uint32_t data_start; /* offset to start of data -// * relative to start of this struct */ - -// uint32_t target_count; /* in/out */ -// int32_t open_count; /* out */ -// uint32_t flags; /* in/out */ - -// /* -// * event_nr holds either the event number (input and output) or the -// * udev cookie value (input only). -// * The DM_DEV_WAIT ioctl takes an event number as input. -// * The DM_SUSPEND, DM_DEV_REMOVE and DM_DEV_RENAME ioctls -// * use the field as a cookie to return in the DM_COOKIE -// * variable with the uevents they issue. -// * For output, the ioctls return the event number, not the cookie. -// */ -// uint32_t event_nr; /* in/out */ -// uint32_t padding; - -// uint64_t dev; /* in/out */ - -// char name[DM_NAME_LEN]; /* device name */ -// char uuid[DM_UUID_LEN]; /* unique identifier for -// * the block device */ -// char data[7]; /* padding or data */ -// }; - -// struct target { -// uint64_t start; -// uint64_t length; -// char *type; -// char *params; - -// struct target *next; -// }; - -// typedef enum { -// DM_ADD_NODE_ON_RESUME, /* add /dev/mapper node with dmsetup resume */ -// DM_ADD_NODE_ON_CREATE /* add /dev/mapper node with dmsetup create */ -// } dm_add_node_t; - -// struct dm_task { -// int type; -// char *dev_name; -// char *mangled_dev_name; - -// struct target *head, *tail; - -// int read_only; -// uint32_t event_nr; -// int major; -// int minor; -// int allow_default_major_fallback; -// uid_t uid; -// gid_t gid; -// mode_t mode; -// uint32_t read_ahead; -// uint32_t read_ahead_flags; -// union { -// struct dm_ioctl *v4; -// } dmi; -// char *newname; -// char *message; -// char *geometry; -// uint64_t sector; -// int no_flush; -// int no_open_count; -// int skip_lockfs; -// int query_inactive_table; -// int suppress_identical_reload; -// dm_add_node_t add_node; -// uint64_t existing_table_size; -// int cookie_set; -// int new_uuid; -// int secure_data; -// int retry_remove; -// int enable_checks; -// int expected_errno; - -// char *uuid; -// char *mangled_uuid; -// }; -// diff --git a/vendor/github.com/containers/storage/drivers/devmapper/driver.go b/vendor/github.com/containers/storage/drivers/devmapper/driver.go deleted file mode 100644 index b188530318..0000000000 --- a/vendor/github.com/containers/storage/drivers/devmapper/driver.go +++ /dev/null @@ -1,272 +0,0 @@ -//go:build linux && cgo -// +build linux,cgo - -package devmapper - -import ( - "fmt" - "os" - "path" - "strconv" - - graphdriver "github.com/containers/storage/drivers" - "github.com/containers/storage/pkg/devicemapper" - "github.com/containers/storage/pkg/directory" - "github.com/containers/storage/pkg/fileutils" - "github.com/containers/storage/pkg/idtools" - "github.com/containers/storage/pkg/locker" - "github.com/containers/storage/pkg/mount" - units "github.com/docker/go-units" - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -const defaultPerms = os.FileMode(0o555) - -func init() { - graphdriver.MustRegister("devicemapper", Init) -} - -// Driver contains the device set mounted and the home directory -type Driver struct { - *DeviceSet - home string - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap - ctr *graphdriver.RefCounter - locker *locker.Locker -} - -// Init creates a driver with the given home and the set of options. -func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) { - deviceSet, err := NewDeviceSet(home, true, options.DriverOptions, options.UIDMaps, options.GIDMaps) - if err != nil { - return nil, err - } - - if err := mount.MakePrivate(home); err != nil { - return nil, err - } - - d := &Driver{ - DeviceSet: deviceSet, - home: home, - uidMaps: options.UIDMaps, - gidMaps: options.GIDMaps, - ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()), - locker: locker.New(), - } - return graphdriver.NewNaiveDiffDriver(d, graphdriver.NewNaiveLayerIDMapUpdater(d)), nil -} - -func (d *Driver) String() string { - return "devicemapper" -} - -// Status returns the status about the driver in a printable format. -// Information returned contains Pool Name, Data File, Metadata file, disk usage by -// the data and metadata, etc. -func (d *Driver) Status() [][2]string { - s := d.DeviceSet.Status() - - status := [][2]string{ - {"Pool Name", s.PoolName}, - {"Pool Blocksize", units.HumanSize(float64(s.SectorSize))}, - {"Base Device Size", units.HumanSize(float64(s.BaseDeviceSize))}, - {"Backing Filesystem", s.BaseDeviceFS}, - {"Data file", s.DataFile}, - {"Metadata file", s.MetadataFile}, - {"Data Space Used", units.HumanSize(float64(s.Data.Used))}, - {"Data Space Total", units.HumanSize(float64(s.Data.Total))}, - {"Data Space Available", units.HumanSize(float64(s.Data.Available))}, - {"Metadata Space Used", units.HumanSize(float64(s.Metadata.Used))}, - {"Metadata Space Total", units.HumanSize(float64(s.Metadata.Total))}, - {"Metadata Space Available", units.HumanSize(float64(s.Metadata.Available))}, - {"Thin Pool Minimum Free Space", units.HumanSize(float64(s.MinFreeSpace))}, - {"Udev Sync Supported", fmt.Sprintf("%v", s.UdevSyncSupported)}, - {"Deferred Removal Enabled", fmt.Sprintf("%v", s.DeferredRemoveEnabled)}, - {"Deferred Deletion Enabled", fmt.Sprintf("%v", s.DeferredDeleteEnabled)}, - {"Deferred Deleted Device Count", fmt.Sprintf("%v", s.DeferredDeletedDeviceCount)}, - } - if len(s.DataLoopback) > 0 { - status = append(status, [2]string{"Data loop file", s.DataLoopback}) - } - if len(s.MetadataLoopback) > 0 { - status = append(status, [2]string{"Metadata loop file", s.MetadataLoopback}) - } - if vStr, err := devicemapper.GetLibraryVersion(); err == nil { - status = append(status, [2]string{"Library Version", vStr}) - } - return status -} - -// Metadata returns a map of information about the device. -func (d *Driver) Metadata(id string) (map[string]string, error) { - m, err := d.DeviceSet.exportDeviceMetadata(id) - if err != nil { - return nil, err - } - - metadata := make(map[string]string) - metadata["DeviceId"] = strconv.Itoa(m.deviceID) - metadata["DeviceSize"] = strconv.FormatUint(m.deviceSize, 10) - metadata["DeviceName"] = m.deviceName - return metadata, nil -} - -// Cleanup unmounts a device. -func (d *Driver) Cleanup() error { - err := d.DeviceSet.Shutdown(d.home) - - umountErr := mount.Unmount(d.home) - // in case we have two errors, prefer the one from Shutdown() - if err != nil { - return err - } - - return umountErr -} - -// CreateFromTemplate creates a layer with the same contents and parent as another layer. -func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error { - return d.Create(id, template, opts) -} - -// CreateReadWrite creates a layer that is writable for use as a container -// file system. -func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { - return d.Create(id, parent, opts) -} - -// Create adds a device with a given id and the parent. -func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { - var storageOpt map[string]string - if opts != nil { - storageOpt = opts.StorageOpt - } - - if err := d.DeviceSet.AddDevice(id, parent, storageOpt); err != nil { - return err - } - - return nil -} - -// Remove removes a device with a given id, unmounts the filesystem, and removes the mount point. -func (d *Driver) Remove(id string) error { - d.locker.Lock(id) - defer d.locker.Unlock(id) - if !d.DeviceSet.HasDevice(id) { - // Consider removing a non-existing device a no-op - // This is useful to be able to progress on container removal - // if the underlying device has gone away due to earlier errors - return nil - } - - // This assumes the device has been properly Get/Put:ed and thus is unmounted - if err := d.DeviceSet.DeleteDevice(id, false); err != nil { - return fmt.Errorf("failed to remove device %s: %v", id, err) - } - - // Most probably the mount point is already removed on Put() - // (see DeviceSet.UnmountDevice()), but just in case it was not - // let's try to remove it here as well, ignoring errors as - // an older kernel can return EBUSY if e.g. the mount was leaked - // to other mount namespaces. A failure to remove the container's - // mount point is not important and should not be treated - // as a failure to remove the container. - mp := path.Join(d.home, "mnt", id) - err := unix.Rmdir(mp) - if err != nil && !os.IsNotExist(err) { - logrus.WithField("storage-driver", "devicemapper").Warnf("unable to remove mount point %q: %s", mp, err) - } - - return nil -} - -// Get mounts a device with given id into the root filesystem -func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) { - d.locker.Lock(id) - defer d.locker.Unlock(id) - mp := path.Join(d.home, "mnt", id) - rootFs := path.Join(mp, "rootfs") - if count := d.ctr.Increment(mp); count > 1 { - return rootFs, nil - } - - uid, gid, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) - if err != nil { - d.ctr.Decrement(mp) - return "", err - } - - // Create the target directories if they don't exist - if err := idtools.MkdirAllAs(path.Join(d.home, "mnt"), 0o755, uid, gid); err != nil { - d.ctr.Decrement(mp) - return "", err - } - if err := idtools.MkdirAs(mp, 0o755, uid, gid); err != nil && !os.IsExist(err) { - d.ctr.Decrement(mp) - return "", err - } - - // Mount the device - if err := d.DeviceSet.MountDevice(id, mp, options); err != nil { - d.ctr.Decrement(mp) - return "", err - } - - if err := idtools.MkdirAllAs(rootFs, defaultPerms, uid, gid); err != nil { - d.ctr.Decrement(mp) - d.DeviceSet.UnmountDevice(id, mp) - return "", err - } - - idFile := path.Join(mp, "id") - if err := fileutils.Exists(idFile); err != nil && os.IsNotExist(err) { - // Create an "id" file with the container/image id in it to help reconstruct this in case - // of later problems - if err := os.WriteFile(idFile, []byte(id), 0o600); err != nil { - d.ctr.Decrement(mp) - d.DeviceSet.UnmountDevice(id, mp) - return "", err - } - } - - return rootFs, nil -} - -// Put unmounts a device and removes it. -func (d *Driver) Put(id string) error { - d.locker.Lock(id) - defer d.locker.Unlock(id) - mp := path.Join(d.home, "mnt", id) - if count := d.ctr.Decrement(mp); count > 0 { - return nil - } - - err := d.DeviceSet.UnmountDevice(id, mp) - if err != nil { - logrus.Errorf("devmapper: Error unmounting device %s: %v", id, err) - } - - return err -} - -// ReadWriteDiskUsage returns the disk usage of the writable directory for the ID. -// For devmapper, it queries the mnt path for this ID. -func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) { - d.locker.Lock(id) - defer d.locker.Unlock(id) - return directory.Usage(path.Join(d.home, "mnt", id)) -} - -// Exists checks to see if the device exists. -func (d *Driver) Exists(id string) bool { - return d.DeviceSet.HasDevice(id) -} - -// AdditionalImageStores returns additional image stores supported by the driver -func (d *Driver) AdditionalImageStores() []string { - return nil -} diff --git a/vendor/github.com/containers/storage/drivers/devmapper/jsoniter.go b/vendor/github.com/containers/storage/drivers/devmapper/jsoniter.go deleted file mode 100644 index 52f0e863e5..0000000000 --- a/vendor/github.com/containers/storage/drivers/devmapper/jsoniter.go +++ /dev/null @@ -1,8 +0,0 @@ -//go:build linux && cgo -// +build linux,cgo - -package devmapper - -import jsoniter "github.com/json-iterator/go" - -var json = jsoniter.ConfigCompatibleWithStandardLibrary diff --git a/vendor/github.com/containers/storage/drivers/devmapper/mount.go b/vendor/github.com/containers/storage/drivers/devmapper/mount.go deleted file mode 100644 index db903ca552..0000000000 --- a/vendor/github.com/containers/storage/drivers/devmapper/mount.go +++ /dev/null @@ -1,89 +0,0 @@ -//go:build linux && cgo -// +build linux,cgo - -package devmapper - -import ( - "bytes" - "fmt" - "os" - "path/filepath" - - "golang.org/x/sys/unix" -) - -// FIXME: this is copy-pasted from the aufs driver. -// It should be moved into the core. - -// Mounted returns true if a mount point exists. -func Mounted(mountpoint string) (bool, error) { - var mntpointSt unix.Stat_t - if err := unix.Stat(mountpoint, &mntpointSt); err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - var parentSt unix.Stat_t - if err := unix.Stat(filepath.Join(mountpoint, ".."), &parentSt); err != nil { - return false, err - } - return mntpointSt.Dev != parentSt.Dev, nil -} - -type probeData struct { - fsName string - magic string - offset uint64 -} - -// ProbeFsType returns the filesystem name for the given device id. -func ProbeFsType(device string) (string, error) { - probes := []probeData{ - {"btrfs", "_BHRfS_M", 0x10040}, - {"ext4", "\123\357", 0x438}, - {"xfs", "XFSB", 0}, - } - - maxLen := uint64(0) - for _, p := range probes { - l := p.offset + uint64(len(p.magic)) - if l > maxLen { - maxLen = l - } - } - - file, err := os.Open(device) - if err != nil { - return "", err - } - defer file.Close() - - buffer := make([]byte, maxLen) - l, err := file.Read(buffer) - if err != nil { - return "", err - } - - if uint64(l) != maxLen { - return "", fmt.Errorf("devmapper: unable to detect filesystem type of %s, short read", device) - } - - for _, p := range probes { - if bytes.Equal([]byte(p.magic), buffer[p.offset:p.offset+uint64(len(p.magic))]) { - return p.fsName, nil - } - } - - return "", fmt.Errorf("devmapper: Unknown filesystem type on %s", device) -} - -func joinMountOptions(a, b string) string { - if a == "" { - return b - } - if b == "" { - return a - } - return a + "," + b -} diff --git a/vendor/github.com/containers/storage/drivers/driver_linux.go b/vendor/github.com/containers/storage/drivers/driver_linux.go index cd806b8ff6..ee0fc7bfcb 100644 --- a/vendor/github.com/containers/storage/drivers/driver_linux.go +++ b/vendor/github.com/containers/storage/drivers/driver_linux.go @@ -94,8 +94,6 @@ var ( // Slice of drivers that should be used in an order Priority = []string{ "overlay", - // We don't support devicemapper without configuration - // "devicemapper", "aufs", "btrfs", "zfs", diff --git a/vendor/github.com/containers/storage/drivers/overlay/composefs.go b/vendor/github.com/containers/storage/drivers/overlay/composefs.go index baa9d7befe..8f07c23602 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/composefs.go +++ b/vendor/github.com/containers/storage/drivers/overlay/composefs.go @@ -4,12 +4,14 @@ package overlay import ( + "bytes" "encoding/binary" "errors" "fmt" "os" "os/exec" "path/filepath" + "strings" "sync" "github.com/containers/storage/pkg/chunked/dump" @@ -70,12 +72,18 @@ func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, com // a scope to close outFd before setting fsverity on the read-only fd. defer outFd.Close() + errBuf := &bytes.Buffer{} cmd := exec.Command(writerJson, "--from-file", "-", "/proc/self/fd/3") cmd.ExtraFiles = []*os.File{outFd} - cmd.Stderr = os.Stderr + cmd.Stderr = errBuf cmd.Stdin = dumpReader if err := cmd.Run(); err != nil { - return fmt.Errorf("failed to convert json to erofs: %w", err) + rErr := fmt.Errorf("failed to convert json to erofs: %w", err) + exitErr := &exec.ExitError{} + if errors.As(err, &exitErr) { + return fmt.Errorf("%w: %s", rErr, strings.TrimSpace(errBuf.String())) + } + return rErr } return nil }() diff --git a/vendor/github.com/containers/storage/drivers/register/register_devicemapper.go b/vendor/github.com/containers/storage/drivers/register/register_devicemapper.go deleted file mode 100644 index a744eaea17..0000000000 --- a/vendor/github.com/containers/storage/drivers/register/register_devicemapper.go +++ /dev/null @@ -1,9 +0,0 @@ -//go:build !exclude_graphdriver_devicemapper && linux && cgo -// +build !exclude_graphdriver_devicemapper,linux,cgo - -package register - -import ( - // register the devmapper graphdriver - _ "github.com/containers/storage/drivers/devmapper" -) diff --git a/vendor/github.com/containers/storage/idset.go b/vendor/github.com/containers/storage/idset.go index be9e45cfd0..43cf1fb5f1 100644 --- a/vendor/github.com/containers/storage/idset.go +++ b/vendor/github.com/containers/storage/idset.go @@ -111,7 +111,7 @@ func (s *idSet) findAvailable(n int) (*idSet, error) { iterator, cancel := s.iterator() defer cancel() for i := iterator(); n > 0 && i != nil; i = iterator() { - i.end = minInt(i.end, i.start+n) + i.end = min(i.end, i.start+n) intervals = append(intervals, *i) n -= i.length() } @@ -129,7 +129,7 @@ func (s *idSet) zip(container *idSet) []idtools.IDMap { defer containerCancel() var out []idtools.IDMap for h, c := hostIterator(), containerIterator(); h != nil && c != nil; { - if n := minInt(h.length(), c.length()); n > 0 { + if n := min(h.length(), c.length()); n > 0 { out = append(out, idtools.IDMap{ ContainerID: c.start, HostID: h.start, @@ -159,12 +159,12 @@ type interval struct { } func (i interval) length() int { - return maxInt(0, i.end-i.start) + return max(0, i.end-i.start) } func (i interval) Intersect(other intervalset.Interval) intervalset.Interval { j := other.(interval) - return interval{start: maxInt(i.start, j.start), end: minInt(i.end, j.end)} + return interval{start: max(i.start, j.start), end: min(i.end, j.end)} } func (i interval) Before(other intervalset.Interval) bool { @@ -183,15 +183,15 @@ func (i interval) Bisect(other intervalset.Interval) (intervalset.Interval, inte } // Subtracting [j.start, j.end) is equivalent to the union of intersecting (-inf, j.start) and // [j.end, +inf). - left := interval{start: i.start, end: minInt(i.end, j.start)} - right := interval{start: maxInt(i.start, j.end), end: i.end} + left := interval{start: i.start, end: min(i.end, j.start)} + right := interval{start: max(i.start, j.end), end: i.end} return left, right } func (i interval) Adjoin(other intervalset.Interval) intervalset.Interval { j := other.(interval) if !i.IsZero() && !j.IsZero() && (i.end == j.start || j.end == i.start) { - return interval{start: minInt(i.start, j.start), end: maxInt(i.end, j.end)} + return interval{start: min(i.start, j.start), end: max(i.end, j.end)} } return interval{} } @@ -204,24 +204,10 @@ func (i interval) Encompass(other intervalset.Interval) intervalset.Interval { case j.IsZero(): return i default: - return interval{start: minInt(i.start, j.start), end: maxInt(i.end, j.end)} + return interval{start: min(i.start, j.start), end: max(i.end, j.end)} } } -func minInt(a, b int) int { - if a < b { - return a - } - return b -} - -func maxInt(a, b int) int { - if a < b { - return b - } - return a -} - func hasOverlappingRanges(mappings []idtools.IDMap) error { hostIntervals := intervalset.Empty() containerIntervals := intervalset.Empty() diff --git a/vendor/github.com/containers/storage/pkg/chunked/bloom_filter.go b/vendor/github.com/containers/storage/pkg/chunked/bloom_filter.go new file mode 100644 index 0000000000..45d76ec30e --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/bloom_filter.go @@ -0,0 +1,87 @@ +package chunked + +import ( + "encoding/binary" + "hash/crc32" + "io" +) + +type bloomFilter struct { + bitArray []uint64 + k uint32 +} + +func newBloomFilter(size int, k uint32) *bloomFilter { + numElements := (size + 63) / 64 + if numElements == 0 { + numElements = 1 + } + return &bloomFilter{ + bitArray: make([]uint64, numElements), + k: k, + } +} + +func newBloomFilterFromArray(bitArray []uint64, k uint32) *bloomFilter { + return &bloomFilter{ + bitArray: bitArray, + k: k, + } +} + +func (bf *bloomFilter) hashFn(item []byte, seed uint32) (uint64, uint64) { + if len(item) == 0 { + return 0, 1 + } + mod := uint32(len(bf.bitArray) * 64) + seedSplit := seed % uint32(len(item)) + hash := (crc32.ChecksumIEEE(item[:seedSplit]) ^ crc32.ChecksumIEEE(item[seedSplit:])) % mod + return uint64(hash / 64), uint64(1 << (hash % 64)) +} + +func (bf *bloomFilter) add(item []byte) { + for i := uint32(0); i < bf.k; i++ { + index, mask := bf.hashFn(item, i) + bf.bitArray[index] |= mask + } +} + +func (bf *bloomFilter) maybeContains(item []byte) bool { + for i := uint32(0); i < bf.k; i++ { + index, mask := bf.hashFn(item, i) + if bf.bitArray[index]&mask == 0 { + return false + } + } + return true +} + +func (bf *bloomFilter) writeTo(writer io.Writer) error { + if err := binary.Write(writer, binary.LittleEndian, uint64(len(bf.bitArray))); err != nil { + return err + } + if err := binary.Write(writer, binary.LittleEndian, uint32(bf.k)); err != nil { + return err + } + if err := binary.Write(writer, binary.LittleEndian, bf.bitArray); err != nil { + return err + } + return nil +} + +func readBloomFilter(reader io.Reader) (*bloomFilter, error) { + var bloomFilterLen uint64 + var k uint32 + + if err := binary.Read(reader, binary.LittleEndian, &bloomFilterLen); err != nil { + return nil, err + } + if err := binary.Read(reader, binary.LittleEndian, &k); err != nil { + return nil, err + } + bloomFilterArray := make([]uint64, bloomFilterLen) + if err := binary.Read(reader, binary.LittleEndian, &bloomFilterArray); err != nil { + return nil, err + } + return newBloomFilterFromArray(bloomFilterArray, k), nil +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go index 3393af6685..a9983c90f9 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go @@ -3,17 +3,16 @@ package chunked import ( "bytes" "encoding/binary" + "encoding/hex" "errors" "fmt" "io" "os" "runtime" "sort" - "strconv" "strings" "sync" "time" - "unsafe" storage "github.com/containers/storage" graphdriver "github.com/containers/storage/drivers" @@ -27,16 +26,24 @@ import ( const ( cacheKey = "chunked-manifest-cache" - cacheVersion = 2 + cacheVersion = 3 digestSha256Empty = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + + // Using 3 hashes functions and n/m = 10 gives a false positive rate of ~1.7%: + // https://pages.cs.wisc.edu/~cao/papers/summary-cache/node8.html + bloomFilterScale = 10 // how much bigger is the bloom filter than the number of entries + bloomFilterHashes = 3 // number of hash functions for the bloom filter ) type cacheFile struct { - tagLen int - digestLen int - tags []byte - vdata []byte + tagLen int + digestLen int + fnamesLen int + tags []byte + vdata []byte + fnames []byte + bloomFilter *bloomFilter } type layer struct { @@ -154,6 +161,23 @@ fallback: return buf, nil, err } +func makeBinaryDigest(stringDigest string) ([]byte, error) { + d, err := digest.Parse(stringDigest) + if err != nil { + return nil, err + } + digestBytes, err := hex.DecodeString(d.Encoded()) + if err != nil { + return nil, err + } + algo := []byte(d.Algorithm()) + buf := make([]byte, 0, len(algo)+1+len(digestBytes)) + buf = append(buf, algo...) + buf = append(buf, ':') + buf = append(buf, digestBytes...) + return buf, nil +} + func (c *layersCache) loadLayerCache(layerID string) (_ *layer, errRet error) { buffer, mmapBuffer, err := c.loadLayerBigData(layerID, cacheKey) if err != nil && !errors.Is(err, os.ErrNotExist) { @@ -175,6 +199,8 @@ func (c *layersCache) loadLayerCache(layerID string) (_ *layer, errRet error) { return c.createLayer(layerID, cacheFile, mmapBuffer) } +// createCacheFileFromTOC attempts to create a cache file for the specified layer. +// If a TOC is not available, the cache won't be created and nil is returned. func (c *layersCache) createCacheFileFromTOC(layerID string) (*layer, error) { clFile, err := c.store.LayerBigData(layerID, chunkedLayerDataKey) if err != nil && !errors.Is(err, os.ErrNotExist) { @@ -195,6 +221,10 @@ func (c *layersCache) createCacheFileFromTOC(layerID string) (*layer, error) { } manifestReader, err := c.store.LayerBigData(layerID, bigDataKey) if err != nil { + // the cache file is not needed since there is no manifest file. + if errors.Is(err, os.ErrNotExist) { + return nil, nil + } return nil, err } defer manifestReader.Close() @@ -244,7 +274,7 @@ func (c *layersCache) load() error { // try to read the existing cache file. l, err := c.loadLayerCache(r.ID) if err != nil { - logrus.Warningf("Error loading cache file for layer %q: %v", r.ID, err) + logrus.Infof("Error loading cache file for layer %q: %v", r.ID, err) } if l != nil { newLayers = append(newLayers, l) @@ -303,16 +333,46 @@ func calculateHardLinkFingerprint(f *fileMetadata) (string, error) { return string(digester.Digest()), nil } -// generateFileLocation generates a file location in the form $OFFSET:$LEN:$PATH -func generateFileLocation(path string, offset, len uint64) []byte { - return []byte(fmt.Sprintf("%d:%d:%s", offset, len, path)) +// generateFileLocation generates a file location in the form $OFFSET$LEN$PATH_POS +func generateFileLocation(pathPos int, offset, len uint64) []byte { + var buf []byte + + buf = binary.AppendUvarint(buf, uint64(pathPos)) + buf = binary.AppendUvarint(buf, offset) + buf = binary.AppendUvarint(buf, len) + + return buf +} + +// parseFileLocation reads what was written by generateFileLocation. +func parseFileLocation(locationData []byte) (int, uint64, uint64, error) { + reader := bytes.NewReader(locationData) + + pathPos, err := binary.ReadUvarint(reader) + if err != nil { + return 0, 0, 0, err + } + + offset, err := binary.ReadUvarint(reader) + if err != nil { + return 0, 0, 0, err + } + + len, err := binary.ReadUvarint(reader) + if err != nil { + return 0, 0, 0, err + } + + return int(pathPos), offset, len, nil } -// generateTag generates a tag in the form $DIGEST$OFFSET@LEN. -// the [OFFSET; LEN] points to the variable length data where the file locations +// appendTag appends the $OFFSET$LEN information to the provided $DIGEST. +// The [OFFSET; LEN] points to the variable length data where the file locations // are stored. $DIGEST has length digestLen stored in the cache file file header. -func generateTag(digest string, offset, len uint64) string { - return fmt.Sprintf("%s%.20d@%.20d", digest, offset, len) +func appendTag(digest []byte, offset, len uint64) ([]byte, error) { + digest = binary.LittleEndian.AppendUint64(digest, offset) + digest = binary.LittleEndian.AppendUint64(digest, len) + return digest, nil } type setBigData interface { @@ -320,6 +380,77 @@ type setBigData interface { SetLayerBigData(id, key string, data io.Reader) error } +func bloomFilterFromTags(tags [][]byte, digestLen int) *bloomFilter { + bloomFilter := newBloomFilter(len(tags)*bloomFilterScale, bloomFilterHashes) + for _, t := range tags { + bloomFilter.add(t[:digestLen]) + } + return bloomFilter +} + +func writeCacheFileToWriter(writer io.Writer, bloomFilter *bloomFilter, tags [][]byte, tagLen, digestLen int, vdata, fnames bytes.Buffer, tagsBuffer *bytes.Buffer) error { + sort.Slice(tags, func(i, j int) bool { + return bytes.Compare(tags[i], tags[j]) == -1 + }) + for _, t := range tags { + if _, err := tagsBuffer.Write(t); err != nil { + return err + } + } + + // version + if err := binary.Write(writer, binary.LittleEndian, uint64(cacheVersion)); err != nil { + return err + } + + // len of a tag + if err := binary.Write(writer, binary.LittleEndian, uint64(tagLen)); err != nil { + return err + } + + // len of a digest + if err := binary.Write(writer, binary.LittleEndian, uint64(digestLen)); err != nil { + return err + } + + // bloom filter + if err := bloomFilter.writeTo(writer); err != nil { + return err + } + + // tags length + if err := binary.Write(writer, binary.LittleEndian, uint64(tagsBuffer.Len())); err != nil { + return err + } + + // vdata length + if err := binary.Write(writer, binary.LittleEndian, uint64(vdata.Len())); err != nil { + return err + } + + // fnames length + if err := binary.Write(writer, binary.LittleEndian, uint64(fnames.Len())); err != nil { + return err + } + + // tags + if _, err := writer.Write(tagsBuffer.Bytes()); err != nil { + return err + } + + // variable length data + if _, err := writer.Write(vdata.Bytes()); err != nil { + return err + } + + // file names + if _, err := writer.Write(fnames.Bytes()); err != nil { + return err + } + + return nil +} + // writeCache write a cache for the layer ID. // It generates a sorted list of digests with their offset to the path location and offset. // The same cache is used to lookup files, chunks and candidates for deduplication with hard links. @@ -328,53 +459,98 @@ type setBigData interface { // - digest(digest(file.payload) + file.UID + file.GID + file.mode + file.xattrs) // - digest(i) for each i in chunks(file payload) func writeCache(manifest []byte, format graphdriver.DifferOutputFormat, id string, dest setBigData) (*cacheFile, error) { - var vdata bytes.Buffer + var vdata, tagsBuffer, fnames bytes.Buffer tagLen := 0 digestLen := 0 - var tagsBuffer bytes.Buffer toc, err := prepareCacheFile(manifest, format) if err != nil { return nil, err } - var tags []string + fnamesMap := make(map[string]int) + getFileNamePosition := func(name string) (int, error) { + if pos, found := fnamesMap[name]; found { + return pos, nil + } + pos := fnames.Len() + fnamesMap[name] = pos + + if err := binary.Write(&fnames, binary.LittleEndian, uint32(len(name))); err != nil { + return 0, err + } + if _, err := fnames.WriteString(name); err != nil { + return 0, err + } + return pos, nil + } + + var tags [][]byte for _, k := range toc { if k.Digest != "" { - location := generateFileLocation(k.Name, 0, uint64(k.Size)) - + digest, err := makeBinaryDigest(k.Digest) + if err != nil { + return nil, err + } + fileNamePos, err := getFileNamePosition(k.Name) + if err != nil { + return nil, err + } + location := generateFileLocation(fileNamePos, 0, uint64(k.Size)) off := uint64(vdata.Len()) l := uint64(len(location)) - d := generateTag(k.Digest, off, l) + tag, err := appendTag(digest, off, l) + if err != nil { + return nil, err + } if tagLen == 0 { - tagLen = len(d) + tagLen = len(tag) } - if tagLen != len(d) { + if tagLen != len(tag) { return nil, errors.New("digest with different length found") } - tags = append(tags, d) + tags = append(tags, tag) fp, err := calculateHardLinkFingerprint(k) if err != nil { return nil, err } - d = generateTag(fp, off, l) - if tagLen != len(d) { + digestHardLink, err := makeBinaryDigest(fp) + if err != nil { + return nil, err + } + tag, err = appendTag(digestHardLink, off, l) + if err != nil { + return nil, err + } + if tagLen != len(tag) { return nil, errors.New("digest with different length found") } - tags = append(tags, d) + tags = append(tags, tag) if _, err := vdata.Write(location); err != nil { return nil, err } - digestLen = len(k.Digest) + digestLen = len(digestHardLink) } if k.ChunkDigest != "" { - location := generateFileLocation(k.Name, uint64(k.ChunkOffset), uint64(k.ChunkSize)) + fileNamePos, err := getFileNamePosition(k.Name) + if err != nil { + return nil, err + } + location := generateFileLocation(fileNamePos, uint64(k.ChunkOffset), uint64(k.ChunkSize)) off := uint64(vdata.Len()) l := uint64(len(location)) - d := generateTag(k.ChunkDigest, off, l) + + digest, err := makeBinaryDigest(k.ChunkDigest) + if err != nil { + return nil, err + } + d, err := appendTag(digest, off, l) + if err != nil { + return nil, err + } if tagLen == 0 { tagLen = len(d) } @@ -386,17 +562,11 @@ func writeCache(manifest []byte, format graphdriver.DifferOutputFormat, id strin if _, err := vdata.Write(location); err != nil { return nil, err } - digestLen = len(k.ChunkDigest) + digestLen = len(digest) } } - sort.Strings(tags) - - for _, t := range tags { - if _, err := tagsBuffer.Write([]byte(t)); err != nil { - return nil, err - } - } + bloomFilter := bloomFilterFromTags(tags, digestLen) pipeReader, pipeWriter := io.Pipe() errChan := make(chan error, 1) @@ -404,49 +574,7 @@ func writeCache(manifest []byte, format graphdriver.DifferOutputFormat, id strin defer pipeWriter.Close() defer close(errChan) - // version - if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(cacheVersion)); err != nil { - errChan <- err - return - } - - // len of a tag - if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(tagLen)); err != nil { - errChan <- err - return - } - - // len of a digest - if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(digestLen)); err != nil { - errChan <- err - return - } - - // tags length - if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(tagsBuffer.Len())); err != nil { - errChan <- err - return - } - - // vdata length - if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(vdata.Len())); err != nil { - errChan <- err - return - } - - // tags - if _, err := pipeWriter.Write(tagsBuffer.Bytes()); err != nil { - errChan <- err - return - } - - // variable length data - if _, err := pipeWriter.Write(vdata.Bytes()); err != nil { - errChan <- err - return - } - - errChan <- nil + errChan <- writeCacheFileToWriter(pipeWriter, bloomFilter, tags, tagLen, digestLen, vdata, fnames, &tagsBuffer) }() defer pipeReader.Close() @@ -465,17 +593,20 @@ func writeCache(manifest []byte, format graphdriver.DifferOutputFormat, id strin logrus.Debugf("Written lookaside cache for layer %q with length %v", id, counter.Count) return &cacheFile{ - digestLen: digestLen, - tagLen: tagLen, - tags: tagsBuffer.Bytes(), - vdata: vdata.Bytes(), + digestLen: digestLen, + tagLen: tagLen, + tags: tagsBuffer.Bytes(), + vdata: vdata.Bytes(), + fnames: fnames.Bytes(), + fnamesLen: len(fnames.Bytes()), + bloomFilter: bloomFilter, }, nil } func readCacheFileFromMemory(bigDataBuffer []byte) (*cacheFile, error) { bigData := bytes.NewReader(bigDataBuffer) - var version, tagLen, digestLen, tagsLen, vdataLen uint64 + var version, tagLen, digestLen, tagsLen, fnamesLen, vdataLen uint64 if err := binary.Read(bigData, binary.LittleEndian, &version); err != nil { return nil, err } @@ -488,6 +619,12 @@ func readCacheFileFromMemory(bigDataBuffer []byte) (*cacheFile, error) { if err := binary.Read(bigData, binary.LittleEndian, &digestLen); err != nil { return nil, err } + + bloomFilter, err := readBloomFilter(bigData) + if err != nil { + return nil, err + } + if err := binary.Read(bigData, binary.LittleEndian, &tagsLen); err != nil { return nil, err } @@ -495,19 +632,28 @@ func readCacheFileFromMemory(bigDataBuffer []byte) (*cacheFile, error) { return nil, err } + if err := binary.Read(bigData, binary.LittleEndian, &fnamesLen); err != nil { + return nil, err + } tags := make([]byte, tagsLen) if _, err := bigData.Read(tags); err != nil { return nil, err } // retrieve the unread part of the buffer. - vdata := bigDataBuffer[len(bigDataBuffer)-bigData.Len():] + remaining := bigDataBuffer[len(bigDataBuffer)-bigData.Len():] + + vdata := remaining[:vdataLen] + fnames := remaining[vdataLen:] return &cacheFile{ - tagLen: int(tagLen), - digestLen: int(digestLen), - tags: tags, - vdata: vdata, + bloomFilter: bloomFilter, + digestLen: int(digestLen), + fnames: fnames, + fnamesLen: int(fnamesLen), + tagLen: int(tagLen), + tags: tags, + vdata: vdata, }, nil } @@ -574,34 +720,32 @@ func (c *layersCache) createLayer(id string, cacheFile *cacheFile, mmapBuffer [] return l, nil } -func byteSliceAsString(b []byte) string { - return *(*string)(unsafe.Pointer(&b)) -} - -func findTag(digest string, cacheFile *cacheFile) (string, uint64, uint64) { - if len(digest) != cacheFile.digestLen { - return "", 0, 0 - } - +func findBinaryTag(binaryDigest []byte, cacheFile *cacheFile) (bool, uint64, uint64) { nElements := len(cacheFile.tags) / cacheFile.tagLen i := sort.Search(nElements, func(i int) bool { - d := byteSliceAsString(cacheFile.tags[i*cacheFile.tagLen : i*cacheFile.tagLen+cacheFile.digestLen]) - return strings.Compare(d, digest) >= 0 + d := cacheFile.tags[i*cacheFile.tagLen : i*cacheFile.tagLen+cacheFile.digestLen] + return bytes.Compare(d, binaryDigest) >= 0 }) if i < nElements { - d := string(cacheFile.tags[i*cacheFile.tagLen : i*cacheFile.tagLen+len(digest)]) - if digest == d { + d := cacheFile.tags[i*cacheFile.tagLen : i*cacheFile.tagLen+cacheFile.digestLen] + if bytes.Equal(binaryDigest, d) { startOff := i*cacheFile.tagLen + cacheFile.digestLen - parts := strings.Split(string(cacheFile.tags[startOff:(i+1)*cacheFile.tagLen]), "@") - off, _ := strconv.ParseInt(parts[0], 10, 64) + // check for corrupted data, there must be 2 u64 (off and len) after the digest. + if cacheFile.tagLen < cacheFile.digestLen+16 { + return false, 0, 0 + } + + offsetAndLen := cacheFile.tags[startOff : (i+1)*cacheFile.tagLen] - len, _ := strconv.ParseInt(parts[1], 10, 64) - return digest, uint64(off), uint64(len) + off := binary.LittleEndian.Uint64(offsetAndLen[:8]) + len := binary.LittleEndian.Uint64(offsetAndLen[8:16]) + + return true, off, len } } - return "", 0, 0 + return false, 0, 0 } func (c *layersCache) findDigestInternal(digest string) (string, string, int64, error) { @@ -609,20 +753,42 @@ func (c *layersCache) findDigestInternal(digest string) (string, string, int64, return "", "", -1, nil } + binaryDigest, err := makeBinaryDigest(digest) + if err != nil { + return "", "", 0, err + } + c.mutex.RLock() defer c.mutex.RUnlock() for _, layer := range c.layers { - digest, off, tagLen := findTag(digest, layer.cacheFile) - if digest != "" { - position := string(layer.cacheFile.vdata[off : off+tagLen]) - parts := strings.SplitN(position, ":", 3) - if len(parts) != 3 { - continue + if !layer.cacheFile.bloomFilter.maybeContains(binaryDigest) { + continue + } + found, off, tagLen := findBinaryTag(binaryDigest, layer.cacheFile) + if found { + if uint64(len(layer.cacheFile.vdata)) < off+tagLen { + return "", "", 0, fmt.Errorf("corrupted cache file for layer %q", layer.id) } - offFile, _ := strconv.ParseInt(parts[0], 10, 64) + fileLocationData := layer.cacheFile.vdata[off : off+tagLen] + + fnamePosition, offFile, _, err := parseFileLocation(fileLocationData) + if err != nil { + return "", "", 0, fmt.Errorf("corrupted cache file for layer %q", layer.id) + } + + if len(layer.cacheFile.fnames) < fnamePosition+4 { + return "", "", 0, fmt.Errorf("corrupted cache file for layer %q", layer.id) + } + lenPath := int(binary.LittleEndian.Uint32(layer.cacheFile.fnames[fnamePosition : fnamePosition+4])) + + if len(layer.cacheFile.fnames) < fnamePosition+lenPath+4 { + return "", "", 0, fmt.Errorf("corrupted cache file for layer %q", layer.id) + } + path := string(layer.cacheFile.fnames[fnamePosition+4 : fnamePosition+lenPath+4]) + // parts[1] is the chunk length, currently unused. - return layer.target, parts[2], offFile, nil + return layer.target, path, int64(offFile), nil } } diff --git a/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go b/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go index 112ca2c7c4..3499acbd18 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go @@ -7,7 +7,6 @@ import ( "io" "strconv" - "github.com/containerd/stargz-snapshotter/estargz" "github.com/containers/storage/pkg/chunked/internal" "github.com/klauspost/compress/zstd" "github.com/klauspost/pgzip" @@ -33,7 +32,7 @@ func typeToTarType(t string) (byte, error) { return r, nil } -func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, int64, error) { +func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, tocDigest digest.Digest) ([]byte, int64, error) { // information on the format here https://github.com/containerd/stargz-snapshotter/blob/main/docs/stargz-estargz.md footerSize := int64(51) if blobSize <= footerSize { @@ -126,88 +125,51 @@ func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, return nil, 0, err } - d, err := digest.Parse(annotations[estargz.TOCJSONDigestAnnotation]) - if err != nil { - return nil, 0, err - } - if manifestDigester.Digest() != d { + if manifestDigester.Digest() != tocDigest { return nil, 0, errors.New("invalid manifest checksum") } return manifestUncompressed, tocOffset, nil } -// readZstdChunkedManifest reads the zstd:chunked manifest from the seekable stream blobStream. The blob total size must -// be specified. -// This function uses the io.github.containers.zstd-chunked. annotations when specified. -func readZstdChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, []byte, int64, error) { - footerSize := int64(internal.FooterSizeSupported) - if blobSize <= footerSize { - return nil, nil, 0, errors.New("blob too small") +// readZstdChunkedManifest reads the zstd:chunked manifest from the seekable stream blobStream. +func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Digest, annotations map[string]string) ([]byte, []byte, int64, error) { + offsetMetadata := annotations[internal.ManifestInfoKey] + if offsetMetadata == "" { + return nil, nil, 0, fmt.Errorf("%q annotation missing", internal.ManifestInfoKey) } - - var footerData internal.ZstdChunkedFooterData - - if offsetMetadata := annotations[internal.ManifestInfoKey]; offsetMetadata != "" { - var err error - footerData, err = internal.ReadFooterDataFromAnnotations(annotations) - if err != nil { - return nil, nil, 0, err - } - } else { - chunk := ImageSourceChunk{ - Offset: uint64(blobSize - footerSize), - Length: uint64(footerSize), - } - parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk}) - if err != nil { - return nil, nil, 0, err - } - var reader io.ReadCloser - select { - case r := <-parts: - reader = r - case err := <-errs: - return nil, nil, 0, err - } - footer := make([]byte, footerSize) - if _, err := io.ReadFull(reader, footer); err != nil { - return nil, nil, 0, err - } - - footerData, err = internal.ReadFooterDataFromBlob(footer) - if err != nil { + var manifestChunk ImageSourceChunk + var manifestLengthUncompressed, manifestType uint64 + if _, err := fmt.Sscanf(offsetMetadata, "%d:%d:%d:%d", &manifestChunk.Offset, &manifestChunk.Length, &manifestLengthUncompressed, &manifestType); err != nil { + return nil, nil, 0, err + } + // The tarSplit… values are valid if tarSplitChunk.Offset > 0 + var tarSplitChunk ImageSourceChunk + var tarSplitLengthUncompressed uint64 + var tarSplitChecksum string + if tarSplitInfoKeyAnnotation, found := annotations[internal.TarSplitInfoKey]; found { + if _, err := fmt.Sscanf(tarSplitInfoKeyAnnotation, "%d:%d:%d", &tarSplitChunk.Offset, &tarSplitChunk.Length, &tarSplitLengthUncompressed); err != nil { return nil, nil, 0, err } + tarSplitChecksum = annotations[internal.TarSplitChecksumKey] } - if footerData.ManifestType != internal.ManifestTypeCRFS { + if manifestType != internal.ManifestTypeCRFS { return nil, nil, 0, errors.New("invalid manifest type") } // set a reasonable limit - if footerData.LengthCompressed > (1<<20)*50 { + if manifestChunk.Length > (1<<20)*50 { return nil, nil, 0, errors.New("manifest too big") } - if footerData.LengthUncompressed > (1<<20)*50 { + if manifestLengthUncompressed > (1<<20)*50 { return nil, nil, 0, errors.New("manifest too big") } - chunk := ImageSourceChunk{ - Offset: footerData.Offset, - Length: footerData.LengthCompressed, - } - - chunks := []ImageSourceChunk{chunk} - - if footerData.OffsetTarSplit > 0 { - chunkTarSplit := ImageSourceChunk{ - Offset: footerData.OffsetTarSplit, - Length: footerData.LengthCompressedTarSplit, - } - chunks = append(chunks, chunkTarSplit) + chunks := []ImageSourceChunk{manifestChunk} + if tarSplitChunk.Offset > 0 { + chunks = append(chunks, tarSplitChunk) } - parts, errs, err := blobStream.GetBlobAt(chunks) if err != nil { return nil, nil, 0, err @@ -233,28 +195,28 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, ann return blob, nil } - manifest, err := readBlob(footerData.LengthCompressed) + manifest, err := readBlob(manifestChunk.Length) if err != nil { return nil, nil, 0, err } - decodedBlob, err := decodeAndValidateBlob(manifest, footerData.LengthUncompressed, footerData.ChecksumAnnotation) + decodedBlob, err := decodeAndValidateBlob(manifest, manifestLengthUncompressed, tocDigest.String()) if err != nil { return nil, nil, 0, err } decodedTarSplit := []byte{} - if footerData.OffsetTarSplit > 0 { - tarSplit, err := readBlob(footerData.LengthCompressedTarSplit) + if tarSplitChunk.Offset > 0 { + tarSplit, err := readBlob(tarSplitChunk.Length) if err != nil { return nil, nil, 0, err } - decodedTarSplit, err = decodeAndValidateBlob(tarSplit, footerData.LengthUncompressedTarSplit, footerData.ChecksumAnnotationTarSplit) + decodedTarSplit, err = decodeAndValidateBlob(tarSplit, tarSplitLengthUncompressed, tarSplitChecksum) if err != nil { return nil, nil, 0, err } } - return decodedBlob, decodedTarSplit, int64(footerData.Offset), err + return decodedBlob, decodedTarSplit, int64(manifestChunk.Offset), err } func decodeAndValidateBlob(blob []byte, lengthUncompressed uint64, expectedCompressedChecksum string) ([]byte, error) { diff --git a/vendor/github.com/containers/storage/pkg/chunked/dump/dump.go b/vendor/github.com/containers/storage/pkg/chunked/dump/dump.go index d3c105c4d5..701b6aa53b 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/dump/dump.go +++ b/vendor/github.com/containers/storage/pkg/chunked/dump/dump.go @@ -52,7 +52,7 @@ func escaped(val string, escape int) string { if noescapeSpace { hexEscape = !unicode.IsPrint(rune(c)) } else { - hexEscape = !unicode.IsGraphic(rune(c)) + hexEscape = !unicode.IsPrint(rune(c)) || unicode.IsSpace(rune(c)) } } diff --git a/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go b/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go index 1136d67b80..52c9ce341a 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go +++ b/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go @@ -8,7 +8,6 @@ import ( "archive/tar" "bytes" "encoding/binary" - "errors" "fmt" "io" "time" @@ -183,11 +182,9 @@ func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, off Offset: manifestOffset, LengthCompressed: uint64(len(compressedManifest)), LengthUncompressed: uint64(len(manifest)), - ChecksumAnnotation: "", // unused OffsetTarSplit: uint64(tarSplitOffset), LengthCompressedTarSplit: uint64(len(tarSplitData.Data)), LengthUncompressedTarSplit: uint64(tarSplitData.UncompressedSize), - ChecksumAnnotationTarSplit: "", // unused } manifestDataLE := footerDataToBlob(footer) @@ -201,18 +198,22 @@ func ZstdWriterWithLevel(dest io.Writer, level int) (*zstd.Encoder, error) { } // ZstdChunkedFooterData contains all the data stored in the zstd:chunked footer. +// This footer exists to make the blobs self-describing, our implementation +// never reads it: +// Partial pull security hinges on the TOC digest, and that exists as a layer annotation; +// so we are relying on the layer annotations anyway, and doing so means we can avoid +// a round-trip to fetch this binary footer. type ZstdChunkedFooterData struct { ManifestType uint64 Offset uint64 LengthCompressed uint64 LengthUncompressed uint64 - ChecksumAnnotation string // Only used when reading a layer, not when creating it OffsetTarSplit uint64 LengthCompressedTarSplit uint64 LengthUncompressedTarSplit uint64 - ChecksumAnnotationTarSplit string // Only used when reading a layer, not when creating it + ChecksumAnnotationTarSplit string // Deprecated: This field is not a part of the footer and not used for any purpose. } func footerDataToBlob(footer ZstdChunkedFooterData) []byte { @@ -229,49 +230,3 @@ func footerDataToBlob(footer ZstdChunkedFooterData) []byte { return manifestDataLE } - -// ReadFooterDataFromAnnotations reads the zstd:chunked footer data from the given annotations. -func ReadFooterDataFromAnnotations(annotations map[string]string) (ZstdChunkedFooterData, error) { - var footerData ZstdChunkedFooterData - - footerData.ChecksumAnnotation = annotations[ManifestChecksumKey] - if footerData.ChecksumAnnotation == "" { - return footerData, fmt.Errorf("manifest checksum annotation %q not found", ManifestChecksumKey) - } - - offsetMetadata := annotations[ManifestInfoKey] - - if _, err := fmt.Sscanf(offsetMetadata, "%d:%d:%d:%d", &footerData.Offset, &footerData.LengthCompressed, &footerData.LengthUncompressed, &footerData.ManifestType); err != nil { - return footerData, err - } - - if tarSplitInfoKeyAnnotation, found := annotations[TarSplitInfoKey]; found { - if _, err := fmt.Sscanf(tarSplitInfoKeyAnnotation, "%d:%d:%d", &footerData.OffsetTarSplit, &footerData.LengthCompressedTarSplit, &footerData.LengthUncompressedTarSplit); err != nil { - return footerData, err - } - footerData.ChecksumAnnotationTarSplit = annotations[TarSplitChecksumKey] - } - return footerData, nil -} - -// ReadFooterDataFromBlob reads the zstd:chunked footer from the binary buffer. -func ReadFooterDataFromBlob(footer []byte) (ZstdChunkedFooterData, error) { - var footerData ZstdChunkedFooterData - - if len(footer) < FooterSizeSupported { - return footerData, errors.New("blob too small") - } - footerData.Offset = binary.LittleEndian.Uint64(footer[0:8]) - footerData.LengthCompressed = binary.LittleEndian.Uint64(footer[8:16]) - footerData.LengthUncompressed = binary.LittleEndian.Uint64(footer[16:24]) - footerData.ManifestType = binary.LittleEndian.Uint64(footer[24:32]) - footerData.OffsetTarSplit = binary.LittleEndian.Uint64(footer[32:40]) - footerData.LengthCompressedTarSplit = binary.LittleEndian.Uint64(footer[40:48]) - footerData.LengthUncompressedTarSplit = binary.LittleEndian.Uint64(footer[48:56]) - - // the magic number is stored in the last 8 bytes - if !bytes.Equal(ZstdChunkedFrameMagic, footer[len(footer)-len(ZstdChunkedFrameMagic):]) { - return footerData, errors.New("invalid magic number") - } - return footerData, nil -} diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go index 7bbfd7bedd..1c4f38663d 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go @@ -25,6 +25,7 @@ import ( "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/chunked/compressor" "github.com/containers/storage/pkg/chunked/internal" + "github.com/containers/storage/pkg/chunked/toc" "github.com/containers/storage/pkg/fsverity" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/system" @@ -216,15 +217,15 @@ func (f *seekableFile) GetBlobAt(chunks []ImageSourceChunk) (chan io.ReadCloser, return streams, errs, nil } -func convertTarToZstdChunked(destDirectory string, payload *os.File) (*seekableFile, digest.Digest, map[string]string, error) { +func convertTarToZstdChunked(destDirectory string, payload *os.File) (int64, *seekableFile, digest.Digest, map[string]string, error) { diff, err := archive.DecompressStream(payload) if err != nil { - return nil, "", nil, err + return 0, nil, "", nil, err } fd, err := unix.Open(destDirectory, unix.O_TMPFILE|unix.O_RDWR|unix.O_CLOEXEC, 0o600) if err != nil { - return nil, "", nil, err + return 0, nil, "", nil, err } f := os.NewFile(uintptr(fd), destDirectory) @@ -234,23 +235,24 @@ func convertTarToZstdChunked(destDirectory string, payload *os.File) (*seekableF chunked, err := compressor.ZstdCompressor(f, newAnnotations, &level) if err != nil { f.Close() - return nil, "", nil, err + return 0, nil, "", nil, err } convertedOutputDigester := digest.Canonical.Digester() - if _, err := io.Copy(io.MultiWriter(chunked, convertedOutputDigester.Hash()), diff); err != nil { + copied, err := io.Copy(io.MultiWriter(chunked, convertedOutputDigester.Hash()), diff) + if err != nil { f.Close() - return nil, "", nil, err + return 0, nil, "", nil, err } if err := chunked.Close(); err != nil { f.Close() - return nil, "", nil, err + return 0, nil, "", nil, err } is := seekableFile{ file: f, } - return &is, convertedOutputDigester.Digest(), newAnnotations, nil + return copied, &is, convertedOutputDigester.Digest(), newAnnotations, nil } // GetDiffer returns a differ than can be used with ApplyDiffWithDiffer. @@ -264,18 +266,26 @@ func GetDiffer(ctx context.Context, store storage.Store, blobDigest digest.Diges return nil, errors.New("enable_partial_images not configured") } - _, hasZstdChunkedTOC := annotations[internal.ManifestChecksumKey] - _, hasEstargzTOC := annotations[estargz.TOCJSONDigestAnnotation] + zstdChunkedTOCDigestString, hasZstdChunkedTOC := annotations[internal.ManifestChecksumKey] + estargzTOCDigestString, hasEstargzTOC := annotations[estargz.TOCJSONDigestAnnotation] if hasZstdChunkedTOC && hasEstargzTOC { return nil, errors.New("both zstd:chunked and eStargz TOC found") } if hasZstdChunkedTOC { - return makeZstdChunkedDiffer(ctx, store, blobSize, annotations, iss, &storeOpts) + zstdChunkedTOCDigest, err := digest.Parse(zstdChunkedTOCDigestString) + if err != nil { + return nil, fmt.Errorf("parsing zstd:chunked TOC digest %q: %w", zstdChunkedTOCDigestString, err) + } + return makeZstdChunkedDiffer(ctx, store, blobSize, zstdChunkedTOCDigest, annotations, iss, &storeOpts) } if hasEstargzTOC { - return makeEstargzChunkedDiffer(ctx, store, blobSize, annotations, iss, &storeOpts) + estargzTOCDigest, err := digest.Parse(estargzTOCDigestString) + if err != nil { + return nil, fmt.Errorf("parsing estargz TOC digest %q: %w", estargzTOCDigestString, err) + } + return makeEstargzChunkedDiffer(ctx, store, blobSize, estargzTOCDigest, iss, &storeOpts) } return makeConvertFromRawDiffer(ctx, store, blobDigest, blobSize, annotations, iss, &storeOpts) @@ -303,8 +313,8 @@ func makeConvertFromRawDiffer(ctx context.Context, store storage.Store, blobDige }, nil } -func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable, storeOpts *types.StoreOptions) (*chunkedDiffer, error) { - manifest, tarSplit, tocOffset, err := readZstdChunkedManifest(iss, blobSize, annotations) +func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, tocDigest digest.Digest, annotations map[string]string, iss ImageSourceSeekable, storeOpts *types.StoreOptions) (*chunkedDiffer, error) { + manifest, tarSplit, tocOffset, err := readZstdChunkedManifest(iss, tocDigest, annotations) if err != nil { return nil, fmt.Errorf("read zstd:chunked manifest: %w", err) } @@ -313,11 +323,6 @@ func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize in return nil, err } - tocDigest, err := digest.Parse(annotations[internal.ManifestChecksumKey]) - if err != nil { - return nil, fmt.Errorf("parse TOC digest %q: %w", annotations[internal.ManifestChecksumKey], err) - } - return &chunkedDiffer{ fsVerityDigests: make(map[string]string), blobSize: blobSize, @@ -333,8 +338,8 @@ func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize in }, nil } -func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable, storeOpts *types.StoreOptions) (*chunkedDiffer, error) { - manifest, tocOffset, err := readEstargzChunkedManifest(iss, blobSize, annotations) +func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, tocDigest digest.Digest, iss ImageSourceSeekable, storeOpts *types.StoreOptions) (*chunkedDiffer, error) { + manifest, tocOffset, err := readEstargzChunkedManifest(iss, blobSize, tocDigest) if err != nil { return nil, fmt.Errorf("read zstd:chunked manifest: %w", err) } @@ -343,11 +348,6 @@ func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize return nil, err } - tocDigest, err := digest.Parse(annotations[estargz.TOCJSONDigestAnnotation]) - if err != nil { - return nil, fmt.Errorf("parse TOC digest %q: %w", annotations[estargz.TOCJSONDigestAnnotation], err) - } - return &chunkedDiffer{ fsVerityDigests: make(map[string]string), blobSize: blobSize, @@ -1653,6 +1653,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff stream := c.stream var uncompressedDigest digest.Digest + var convertedBlobSize int64 if c.convertToZstdChunked { fd, err := unix.Open(dest, unix.O_TMPFILE|unix.O_RDWR|unix.O_CLOEXEC, 0o600) @@ -1680,10 +1681,11 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff return graphdriver.DriverWithDifferOutput{}, err } - fileSource, diffID, annotations, err := convertTarToZstdChunked(dest, blobFile) + tarSize, fileSource, diffID, annotations, err := convertTarToZstdChunked(dest, blobFile) if err != nil { return graphdriver.DriverWithDifferOutput{}, err } + convertedBlobSize = tarSize // fileSource is a O_TMPFILE file descriptor, so we // need to keep it open until the entire file is processed. defer fileSource.Close() @@ -1692,7 +1694,14 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff blobFile.Close() blobFile = nil - manifest, tarSplit, tocOffset, err := readZstdChunkedManifest(fileSource, c.blobSize, annotations) + tocDigest, err := toc.GetTOCDigest(annotations) + if err != nil { + return graphdriver.DriverWithDifferOutput{}, fmt.Errorf("internal error: parsing just-created zstd:chunked TOC digest: %w", err) + } + if tocDigest == nil { + return graphdriver.DriverWithDifferOutput{}, fmt.Errorf("internal error: just-created zstd:chunked missing TOC digest") + } + manifest, tarSplit, tocOffset, err := readZstdChunkedManifest(fileSource, *tocDigest, annotations) if err != nil { return graphdriver.DriverWithDifferOutput{}, fmt.Errorf("read zstd:chunked manifest: %w", err) } @@ -1753,13 +1762,19 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff var missingParts []missingPart - mergedEntries, totalSize, err := c.mergeTocEntries(c.fileType, toc.Entries) + mergedEntries, totalSizeFromTOC, err := c.mergeTocEntries(c.fileType, toc.Entries) if err != nil { return output, err } output.UIDs, output.GIDs = collectIDs(mergedEntries) - output.Size = totalSize + if convertedBlobSize > 0 { + // if the image was converted, store the original tar size, so that + // it can be recreated correctly. + output.Size = convertedBlobSize + } else { + output.Size = totalSizeFromTOC + } if err := maybeDoIDRemap(mergedEntries, options); err != nil { return output, err diff --git a/vendor/github.com/containers/storage/pkg/config/config.go b/vendor/github.com/containers/storage/pkg/config/config.go index febe8a0c5a..dc70531de9 100644 --- a/vendor/github.com/containers/storage/pkg/config/config.go +++ b/vendor/github.com/containers/storage/pkg/config/config.go @@ -5,72 +5,6 @@ import ( "os" ) -// ThinpoolOptionsConfig represents the "storage.options.thinpool" -// TOML config table. -type ThinpoolOptionsConfig struct { - // AutoExtendPercent determines the amount by which pool needs to be - // grown. This is specified in terms of % of pool size. So a value of - // 20 means that when threshold is hit, pool will be grown by 20% of - // existing pool size. - AutoExtendPercent string `toml:"autoextend_percent,omitempty"` - - // AutoExtendThreshold determines the pool extension threshold in terms - // of percentage of pool size. For example, if threshold is 60, that - // means when pool is 60% full, threshold has been hit. - AutoExtendThreshold string `toml:"autoextend_threshold,omitempty"` - - // BaseSize specifies the size to use when creating the base device, - // which limits the size of images and containers. - BaseSize string `toml:"basesize,omitempty"` - - // BlockSize specifies a custom blocksize to use for the thin pool. - BlockSize string `toml:"blocksize,omitempty"` - - // DirectLvmDevice specifies a custom block storage device to use for - // the thin pool. - DirectLvmDevice string `toml:"directlvm_device,omitempty"` - - // DirectLvmDeviceForcewipes device even if device already has a - // filesystem - DirectLvmDeviceForce string `toml:"directlvm_device_force,omitempty"` - - // Fs specifies the filesystem type to use for the base device. - Fs string `toml:"fs,omitempty"` - - // log_level sets the log level of devicemapper. - LogLevel string `toml:"log_level,omitempty"` - - // MetadataSize specifies the size of the metadata for the thinpool - // It will be used with the `pvcreate --metadata` option. - MetadataSize string `toml:"metadatasize,omitempty"` - - // MinFreeSpace specifies the min free space percent in a thin pool - // require for new device creation to - MinFreeSpace string `toml:"min_free_space,omitempty"` - - // MkfsArg specifies extra mkfs arguments to be used when creating the - // basedevice. - MkfsArg string `toml:"mkfsarg,omitempty"` - - // MountOpt specifies extra mount options used when mounting the thin - // devices. - MountOpt string `toml:"mountopt,omitempty"` - - // Size - Size string `toml:"size,omitempty"` - - // UseDeferredDeletion marks device for deferred deletion - UseDeferredDeletion string `toml:"use_deferred_deletion,omitempty"` - - // UseDeferredRemoval marks device for deferred removal - UseDeferredRemoval string `toml:"use_deferred_removal,omitempty"` - - // XfsNoSpaceMaxRetriesFreeSpace specifies the maximum number of - // retries XFS should attempt to complete IO when ENOSPC (no space) - // error is returned by underlying storage device. - XfsNoSpaceMaxRetries string `toml:"xfs_nospace_max_retries,omitempty"` -} - type AufsOptionsConfig struct { // MountOpt specifies extra mount options used when mounting MountOpt string `toml:"mountopt,omitempty"` @@ -181,9 +115,6 @@ type OptionsConfig struct { // Btrfs container options to be handed to btrfs drivers Btrfs struct{ BtrfsOptionsConfig } `toml:"btrfs,omitempty"` - // Thinpool container options to be handed to thinpool drivers - Thinpool struct{ ThinpoolOptionsConfig } `toml:"thinpool,omitempty"` - // Overlay container options to be handed to overlay drivers Overlay struct{ OverlayOptionsConfig } `toml:"overlay,omitempty"` @@ -231,62 +162,6 @@ func GetGraphDriverOptions(driverName string, options OptionsConfig) []string { doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Size)) } - case "devicemapper": - if options.Thinpool.AutoExtendPercent != "" { - doptions = append(doptions, fmt.Sprintf("dm.thinp_autoextend_percent=%s", options.Thinpool.AutoExtendPercent)) - } - if options.Thinpool.AutoExtendThreshold != "" { - doptions = append(doptions, fmt.Sprintf("dm.thinp_autoextend_threshold=%s", options.Thinpool.AutoExtendThreshold)) - } - if options.Thinpool.BaseSize != "" { - doptions = append(doptions, fmt.Sprintf("dm.basesize=%s", options.Thinpool.BaseSize)) - } - if options.Thinpool.BlockSize != "" { - doptions = append(doptions, fmt.Sprintf("dm.blocksize=%s", options.Thinpool.BlockSize)) - } - if options.Thinpool.DirectLvmDevice != "" { - doptions = append(doptions, fmt.Sprintf("dm.directlvm_device=%s", options.Thinpool.DirectLvmDevice)) - } - if options.Thinpool.DirectLvmDeviceForce != "" { - doptions = append(doptions, fmt.Sprintf("dm.directlvm_device_force=%s", options.Thinpool.DirectLvmDeviceForce)) - } - if options.Thinpool.Fs != "" { - doptions = append(doptions, fmt.Sprintf("dm.fs=%s", options.Thinpool.Fs)) - } - if options.Thinpool.LogLevel != "" { - doptions = append(doptions, fmt.Sprintf("dm.libdm_log_level=%s", options.Thinpool.LogLevel)) - } - if options.Thinpool.MetadataSize != "" { - doptions = append(doptions, fmt.Sprintf("dm.metadata_size=%s", options.Thinpool.MetadataSize)) - } - if options.Thinpool.MinFreeSpace != "" { - doptions = append(doptions, fmt.Sprintf("dm.min_free_space=%s", options.Thinpool.MinFreeSpace)) - } - if options.Thinpool.MkfsArg != "" { - doptions = append(doptions, fmt.Sprintf("dm.mkfsarg=%s", options.Thinpool.MkfsArg)) - } - if options.Thinpool.MountOpt != "" { - doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.Thinpool.MountOpt)) - } else if options.MountOpt != "" { - doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.MountOpt)) - } - - if options.Thinpool.Size != "" { - doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Thinpool.Size)) - } else if options.Size != "" { - doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Size)) - } - - if options.Thinpool.UseDeferredDeletion != "" { - doptions = append(doptions, fmt.Sprintf("dm.use_deferred_deletion=%s", options.Thinpool.UseDeferredDeletion)) - } - if options.Thinpool.UseDeferredRemoval != "" { - doptions = append(doptions, fmt.Sprintf("dm.use_deferred_removal=%s", options.Thinpool.UseDeferredRemoval)) - } - if options.Thinpool.XfsNoSpaceMaxRetries != "" { - doptions = append(doptions, fmt.Sprintf("dm.xfs_nospace_max_retries=%s", options.Thinpool.XfsNoSpaceMaxRetries)) - } - case "overlay", "overlay2": // Specify whether composefs must be used to mount the data layers if options.Overlay.IgnoreChownErrors != "" { diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go deleted file mode 100644 index 33bf7184e3..0000000000 --- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go +++ /dev/null @@ -1,813 +0,0 @@ -//go:build linux && cgo -// +build linux,cgo - -package devicemapper - -import ( - "errors" - "fmt" - "os" - "runtime" - "unsafe" - - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -// Same as DM_DEVICE_* enum values from libdevmapper.h -// nolint: unused -const ( - deviceCreate TaskType = iota - deviceReload - deviceRemove - deviceRemoveAll - deviceSuspend - deviceResume - deviceInfo - deviceDeps - deviceRename - deviceVersion - deviceStatus - deviceTable - deviceWaitevent - deviceList - deviceClear - deviceMknodes - deviceListVersions - deviceTargetMsg - deviceSetGeometry -) - -const ( - addNodeOnResume AddNodeType = iota - addNodeOnCreate -) - -// List of errors returned when using devicemapper. -var ( - ErrTaskRun = errors.New("dm_task_run failed") - ErrTaskSetName = errors.New("dm_task_set_name failed") - ErrTaskSetMessage = errors.New("dm_task_set_message failed") - ErrTaskSetAddNode = errors.New("dm_task_set_add_node failed") - ErrTaskSetRo = errors.New("dm_task_set_ro failed") - ErrTaskAddTarget = errors.New("dm_task_add_target failed") - ErrTaskSetSector = errors.New("dm_task_set_sector failed") - ErrTaskGetDeps = errors.New("dm_task_get_deps failed") - ErrTaskGetInfo = errors.New("dm_task_get_info failed") - ErrTaskGetDriverVersion = errors.New("dm_task_get_driver_version failed") - ErrTaskDeferredRemove = errors.New("dm_task_deferred_remove failed") - ErrTaskSetCookie = errors.New("dm_task_set_cookie failed") - ErrNilCookie = errors.New("cookie ptr can't be nil") - ErrGetBlockSize = errors.New("Can't get block size") - ErrUdevWait = errors.New("wait on udev cookie failed") - ErrSetDevDir = errors.New("dm_set_dev_dir failed") - ErrGetLibraryVersion = errors.New("dm_get_library_version failed") - ErrCreateRemoveTask = errors.New("Can't create task of type deviceRemove") - ErrRunRemoveDevice = errors.New("running RemoveDevice failed") - ErrInvalidAddNode = errors.New("Invalid AddNode type") - ErrBusy = errors.New("Device is Busy") - ErrDeviceIDExists = errors.New("Device Id Exists") - ErrEnxio = errors.New("No such device or address") -) - -var ( - dmSawBusy bool - dmSawExist bool - dmSawEnxio bool // No Such Device or Address -) - -type ( - // Task represents a devicemapper task (like lvcreate, etc.) ; a task is needed for each ioctl - // command to execute. - Task struct { - unmanaged *cdmTask - } - // Deps represents dependents (layer) of a device. - Deps struct { - Count uint32 - Filler uint32 - Device []uint64 - } - // Info represents information about a device. - Info struct { - Exists int - Suspended int - LiveTable int - InactiveTable int - OpenCount int32 - EventNr uint32 - Major uint32 - Minor uint32 - ReadOnly int - TargetCount int32 - DeferredRemove int - } - // TaskType represents a type of task - TaskType int - // AddNodeType represents a type of node to be added - AddNodeType int -) - -// DeviceIDExists returns whether error conveys the information about device Id already -// exist or not. This will be true if device creation or snap creation -// operation fails if device or snap device already exists in pool. -// Current implementation is little crude as it scans the error string -// for exact pattern match. Replacing it with more robust implementation -// is desirable. -func DeviceIDExists(err error) bool { - return fmt.Sprint(err) == fmt.Sprint(ErrDeviceIDExists) -} - -func (t *Task) destroy() { - if t != nil { - DmTaskDestroy(t.unmanaged) - runtime.SetFinalizer(t, nil) - } -} - -// TaskCreateNamed is a convenience function for TaskCreate when a name -// will be set on the task as well -func TaskCreateNamed(t TaskType, name string) (*Task, error) { - task := TaskCreate(t) - if task == nil { - return nil, fmt.Errorf("devicemapper: Can't create task of type %d", int(t)) - } - if err := task.setName(name); err != nil { - return nil, fmt.Errorf("devicemapper: Can't set task name %s", name) - } - return task, nil -} - -// TaskCreate initializes a devicemapper task of tasktype -func TaskCreate(tasktype TaskType) *Task { - Ctask := DmTaskCreate(int(tasktype)) - if Ctask == nil { - return nil - } - task := &Task{unmanaged: Ctask} - runtime.SetFinalizer(task, (*Task).destroy) - return task -} - -func (t *Task) run() error { - if res := DmTaskRun(t.unmanaged); res != 1 { - return ErrTaskRun - } - runtime.KeepAlive(t) - return nil -} - -func (t *Task) setName(name string) error { - if res := DmTaskSetName(t.unmanaged, name); res != 1 { - return ErrTaskSetName - } - return nil -} - -func (t *Task) setMessage(message string) error { - if res := DmTaskSetMessage(t.unmanaged, message); res != 1 { - return ErrTaskSetMessage - } - return nil -} - -func (t *Task) setSector(sector uint64) error { - if res := DmTaskSetSector(t.unmanaged, sector); res != 1 { - return ErrTaskSetSector - } - return nil -} - -func (t *Task) setCookie(cookie *uint, flags uint16) error { - if cookie == nil { - return ErrNilCookie - } - if res := DmTaskSetCookie(t.unmanaged, cookie, flags); res != 1 { - return ErrTaskSetCookie - } - return nil -} - -func (t *Task) setAddNode(addNode AddNodeType) error { - if addNode != addNodeOnResume && addNode != addNodeOnCreate { - return ErrInvalidAddNode - } - if res := DmTaskSetAddNode(t.unmanaged, addNode); res != 1 { - return ErrTaskSetAddNode - } - return nil -} - -func (t *Task) addTarget(start, size uint64, ttype, params string) error { - if res := DmTaskAddTarget(t.unmanaged, start, size, - ttype, params); res != 1 { - return ErrTaskAddTarget - } - return nil -} - -func (t *Task) getDeps() (*Deps, error) { //nolint:unused - var deps *Deps - if deps = DmTaskGetDeps(t.unmanaged); deps == nil { - return nil, ErrTaskGetDeps - } - return deps, nil -} - -func (t *Task) getInfo() (*Info, error) { - info := &Info{} - if res := DmTaskGetInfo(t.unmanaged, info); res != 1 { - return nil, ErrTaskGetInfo - } - return info, nil -} - -func (t *Task) getInfoWithDeferred() (*Info, error) { - info := &Info{} - if res := DmTaskGetInfoWithDeferred(t.unmanaged, info); res != 1 { - return nil, ErrTaskGetInfo - } - return info, nil -} - -func (t *Task) getDriverVersion() (string, error) { - res := DmTaskGetDriverVersion(t.unmanaged) - if res == "" { - return "", ErrTaskGetDriverVersion - } - return res, nil -} - -func (t *Task) getNextTarget(next unsafe.Pointer) (nextPtr unsafe.Pointer, start uint64, - length uint64, targetType string, params string, -) { - return DmGetNextTarget(t.unmanaged, next, &start, &length, - &targetType, ¶ms), - start, length, targetType, params -} - -// UdevWait waits for any processes that are waiting for udev to complete the specified cookie. -func UdevWait(cookie *uint) error { - if res := DmUdevWait(*cookie); res != 1 { - logrus.Debugf("devicemapper: Failed to wait on udev cookie %d, %d", *cookie, res) - return ErrUdevWait - } - return nil -} - -// SetDevDir sets the dev folder for the device mapper library (usually /dev). -func SetDevDir(dir string) error { - if res := DmSetDevDir(dir); res != 1 { - logrus.Debug("devicemapper: Error dm_set_dev_dir") - return ErrSetDevDir - } - return nil -} - -// GetLibraryVersion returns the device mapper library version. -func GetLibraryVersion() (string, error) { - var version string - if res := DmGetLibraryVersion(&version); res != 1 { - return "", ErrGetLibraryVersion - } - return version, nil -} - -// UdevSyncSupported returns whether device-mapper is able to sync with udev -// -// This is essential otherwise race conditions can arise where both udev and -// device-mapper attempt to create and destroy devices. -func UdevSyncSupported() bool { - return DmUdevGetSyncSupport() != 0 -} - -// UdevSetSyncSupport allows setting whether the udev sync should be enabled. -// The return bool indicates the state of whether the sync is enabled. -func UdevSetSyncSupport(enable bool) bool { - if enable { - DmUdevSetSyncSupport(1) - } else { - DmUdevSetSyncSupport(0) - } - - return UdevSyncSupported() -} - -// CookieSupported returns whether the version of device-mapper supports the -// use of cookie's in the tasks. -// This is largely a lower level call that other functions use. -func CookieSupported() bool { - return DmCookieSupported() != 0 -} - -// RemoveDevice is a useful helper for cleaning up a device. -func RemoveDevice(name string) error { - task, err := TaskCreateNamed(deviceRemove, name) - if task == nil { - return err - } - - cookie := new(uint) - if err := task.setCookie(cookie, 0); err != nil { - return fmt.Errorf("devicemapper: Can not set cookie: %s", err) - } - defer UdevWait(cookie) - - dmSawBusy = false // reset before the task is run - dmSawEnxio = false - if err = task.run(); err != nil { - if dmSawBusy { - return ErrBusy - } - if dmSawEnxio { - return ErrEnxio - } - return fmt.Errorf("devicemapper: Error running RemoveDevice %s", err) - } - - return nil -} - -// RemoveDeviceDeferred is a useful helper for cleaning up a device, but deferred. -func RemoveDeviceDeferred(name string) error { - logrus.Debugf("devicemapper: RemoveDeviceDeferred START(%s)", name) - defer logrus.Debugf("devicemapper: RemoveDeviceDeferred END(%s)", name) - task, err := TaskCreateNamed(deviceRemove, name) - if task == nil { - return err - } - - if err := DmTaskDeferredRemove(task.unmanaged); err != 1 { - return ErrTaskDeferredRemove - } - - // set a task cookie and disable library fallback, or else libdevmapper will - // disable udev dm rules and delete the symlink under /dev/mapper by itself, - // even if the removal is deferred by the kernel. - cookie := new(uint) - flags := uint16(DmUdevDisableLibraryFallback) - if err := task.setCookie(cookie, flags); err != nil { - return fmt.Errorf("devicemapper: Can not set cookie: %s", err) - } - - // libdevmapper and udev relies on System V semaphore for synchronization, - // semaphores created in `task.setCookie` will be cleaned up in `UdevWait`. - // So these two function call must come in pairs, otherwise semaphores will - // be leaked, and the limit of number of semaphores defined in `/proc/sys/kernel/sem` - // will be reached, which will eventually make all following calls to 'task.SetCookie' - // fail. - // this call will not wait for the deferred removal's final executing, since no - // udev event will be generated, and the semaphore's value will not be incremented - // by udev, what UdevWait is just cleaning up the semaphore. - defer UdevWait(cookie) - - dmSawEnxio = false - if err = task.run(); err != nil { - if dmSawEnxio { - return ErrEnxio - } - return fmt.Errorf("devicemapper: Error running RemoveDeviceDeferred %s", err) - } - - return nil -} - -// CancelDeferredRemove cancels a deferred remove for a device. -func CancelDeferredRemove(deviceName string) error { - task, err := TaskCreateNamed(deviceTargetMsg, deviceName) - if task == nil { - return err - } - - if err := task.setSector(0); err != nil { - return fmt.Errorf("devicemapper: Can't set sector %s", err) - } - - if err := task.setMessage("@cancel_deferred_remove"); err != nil { - return fmt.Errorf("devicemapper: Can't set message %s", err) - } - - dmSawBusy = false - dmSawEnxio = false - if err := task.run(); err != nil { - // A device might be being deleted already - if dmSawBusy { - return ErrBusy - } else if dmSawEnxio { - return ErrEnxio - } - return fmt.Errorf("devicemapper: Error running CancelDeferredRemove %s", err) - - } - return nil -} - -// GetBlockDeviceSize returns the size of a block device identified by the specified file. -func GetBlockDeviceSize(file *os.File) (uint64, error) { - size, err := ioctlBlkGetSize64(file.Fd()) - if err != nil { - logrus.Errorf("devicemapper: Error getblockdevicesize: %s", err) - return 0, ErrGetBlockSize - } - return uint64(size), nil -} - -// BlockDeviceDiscard runs discard for the given path. -// This is used as a workaround for the kernel not discarding block so -// on the thin pool when we remove a thinp device, so we do it -// manually -func BlockDeviceDiscard(path string) error { - file, err := os.OpenFile(path, os.O_RDWR, 0) - if err != nil { - return err - } - defer file.Close() - - size, err := GetBlockDeviceSize(file) - if err != nil { - return err - } - - if err := ioctlBlkDiscard(file.Fd(), 0, size); err != nil { - return err - } - - // Without this sometimes the remove of the device that happens after - // discard fails with EBUSY. - unix.Sync() - - return nil -} - -// CreatePool is the programmatic example of "dmsetup create". -// It creates a device with the specified poolName, data and metadata file and block size. -func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { - task, err := TaskCreateNamed(deviceCreate, poolName) - if task == nil { - return err - } - - size, err := GetBlockDeviceSize(dataFile) - if err != nil { - return fmt.Errorf("devicemapper: Can't get data size %s", err) - } - - params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) - if err := task.addTarget(0, size/512, "thin-pool", params); err != nil { - return fmt.Errorf("devicemapper: Can't add target %s", err) - } - - cookie := new(uint) - flags := uint16(DmUdevDisableSubsystemRulesFlag | DmUdevDisableDiskRulesFlag | DmUdevDisableOtherRulesFlag) - if err := task.setCookie(cookie, flags); err != nil { - return fmt.Errorf("devicemapper: Can't set cookie %s", err) - } - defer UdevWait(cookie) - - if err := task.run(); err != nil { - return fmt.Errorf("devicemapper: Error running deviceCreate (CreatePool) %s", err) - } - - return nil -} - -// ReloadPool is the programmatic example of "dmsetup reload". -// It reloads the table with the specified poolName, data and metadata file and block size. -func ReloadPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { - task, err := TaskCreateNamed(deviceReload, poolName) - if task == nil { - return err - } - - size, err := GetBlockDeviceSize(dataFile) - if err != nil { - return fmt.Errorf("devicemapper: Can't get data size %s", err) - } - - params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) - if err := task.addTarget(0, size/512, "thin-pool", params); err != nil { - return fmt.Errorf("devicemapper: Can't add target %s", err) - } - - if err := task.run(); err != nil { - return fmt.Errorf("devicemapper: Error running ReloadPool %s", err) - } - - return nil -} - -// GetDeps is the programmatic example of "dmsetup deps". -// It outputs a list of devices referenced by the live table for the specified device. -func GetDeps(name string) (*Deps, error) { - task, err := TaskCreateNamed(deviceDeps, name) - if task == nil { - return nil, err - } - if err := task.run(); err != nil { - return nil, err - } - return task.getDeps() -} - -// GetInfo is the programmatic example of "dmsetup info". -// It outputs some brief information about the device. -func GetInfo(name string) (*Info, error) { - task, err := TaskCreateNamed(deviceInfo, name) - if task == nil { - return nil, err - } - if err := task.run(); err != nil { - return nil, err - } - return task.getInfo() -} - -// GetInfoWithDeferred is the programmatic example of "dmsetup info", but deferred. -// It outputs some brief information about the device. -func GetInfoWithDeferred(name string) (*Info, error) { - task, err := TaskCreateNamed(deviceInfo, name) - if task == nil { - return nil, err - } - if err := task.run(); err != nil { - return nil, err - } - return task.getInfoWithDeferred() -} - -// GetDriverVersion is the programmatic example of "dmsetup version". -// It outputs version information of the driver. -func GetDriverVersion() (string, error) { - task := TaskCreate(deviceVersion) - if task == nil { - return "", fmt.Errorf("devicemapper: Can't create deviceVersion task") - } - if err := task.run(); err != nil { - return "", err - } - return task.getDriverVersion() -} - -// GetStatus is the programmatic example of "dmsetup status". -// It outputs status information for the specified device name. -func GetStatus(name string) (uint64, uint64, string, string, error) { - task, err := TaskCreateNamed(deviceStatus, name) - if task == nil { - logrus.Debugf("devicemapper: GetStatus() Error TaskCreateNamed: %s", err) - return 0, 0, "", "", err - } - if err := task.run(); err != nil { - logrus.Debugf("devicemapper: GetStatus() Error Run: %s", err) - return 0, 0, "", "", err - } - - devinfo, err := task.getInfo() - if err != nil { - logrus.Debugf("devicemapper: GetStatus() Error GetInfo: %s", err) - return 0, 0, "", "", err - } - if devinfo.Exists == 0 { - logrus.Debugf("devicemapper: GetStatus() Non existing device %s", name) - return 0, 0, "", "", fmt.Errorf("devicemapper: Non existing device %s", name) - } - - _, start, length, targetType, params := task.getNextTarget(unsafe.Pointer(nil)) - return start, length, targetType, params, nil -} - -// GetTable is the programmatic example for "dmsetup table". -// It outputs the current table for the specified device name. -func GetTable(name string) (uint64, uint64, string, string, error) { - task, err := TaskCreateNamed(deviceTable, name) - if task == nil { - logrus.Debugf("devicemapper: GetTable() Error TaskCreateNamed: %s", err) - return 0, 0, "", "", err - } - if err := task.run(); err != nil { - logrus.Debugf("devicemapper: GetTable() Error Run: %s", err) - return 0, 0, "", "", err - } - - devinfo, err := task.getInfo() - if err != nil { - logrus.Debugf("devicemapper: GetTable() Error GetInfo: %s", err) - return 0, 0, "", "", err - } - if devinfo.Exists == 0 { - logrus.Debugf("devicemapper: GetTable() Non existing device %s", name) - return 0, 0, "", "", fmt.Errorf("devicemapper: Non existing device %s", name) - } - - _, start, length, targetType, params := task.getNextTarget(unsafe.Pointer(nil)) - return start, length, targetType, params, nil -} - -// SetTransactionID sets a transaction id for the specified device name. -func SetTransactionID(poolName string, oldID uint64, newID uint64) error { - task, err := TaskCreateNamed(deviceTargetMsg, poolName) - if task == nil { - return err - } - - if err := task.setSector(0); err != nil { - return fmt.Errorf("devicemapper: Can't set sector %s", err) - } - - if err := task.setMessage(fmt.Sprintf("set_transaction_id %d %d", oldID, newID)); err != nil { - return fmt.Errorf("devicemapper: Can't set message %s", err) - } - - if err := task.run(); err != nil { - return fmt.Errorf("devicemapper: Error running SetTransactionID %s", err) - } - return nil -} - -// SuspendDevice is the programmatic example of "dmsetup suspend". -// It suspends the specified device. -func SuspendDevice(name string) error { - task, err := TaskCreateNamed(deviceSuspend, name) - if task == nil { - return err - } - if err := task.run(); err != nil { - return fmt.Errorf("devicemapper: Error running deviceSuspend %s", err) - } - return nil -} - -// ResumeDevice is the programmatic example of "dmsetup resume". -// It un-suspends the specified device. -func ResumeDevice(name string) error { - task, err := TaskCreateNamed(deviceResume, name) - if task == nil { - return err - } - - cookie := new(uint) - if err := task.setCookie(cookie, 0); err != nil { - return fmt.Errorf("devicemapper: Can't set cookie %s", err) - } - defer UdevWait(cookie) - - if err := task.run(); err != nil { - return fmt.Errorf("devicemapper: Error running deviceResume %s", err) - } - - return nil -} - -// CreateDevice creates a device with the specified poolName with the specified device id. -func CreateDevice(poolName string, deviceID int) error { - logrus.Debugf("devicemapper: CreateDevice(poolName=%v, deviceID=%v)", poolName, deviceID) - task, err := TaskCreateNamed(deviceTargetMsg, poolName) - if task == nil { - return err - } - - if err := task.setSector(0); err != nil { - return fmt.Errorf("devicemapper: Can't set sector %s", err) - } - - if err := task.setMessage(fmt.Sprintf("create_thin %d", deviceID)); err != nil { - return fmt.Errorf("devicemapper: Can't set message %s", err) - } - - dmSawExist = false // reset before the task is run - if err := task.run(); err != nil { - // Caller wants to know about ErrDeviceIDExists so that it can try with a different device id. - if dmSawExist { - return ErrDeviceIDExists - } - - return fmt.Errorf("devicemapper: Error running CreateDevice %s", err) - - } - return nil -} - -// DeleteDevice deletes a device with the specified poolName with the specified device id. -func DeleteDevice(poolName string, deviceID int) error { - task, err := TaskCreateNamed(deviceTargetMsg, poolName) - if task == nil { - return err - } - - if err := task.setSector(0); err != nil { - return fmt.Errorf("devicemapper: Can't set sector %s", err) - } - - if err := task.setMessage(fmt.Sprintf("delete %d", deviceID)); err != nil { - return fmt.Errorf("devicemapper: Can't set message %s", err) - } - - dmSawBusy = false - if err := task.run(); err != nil { - if dmSawBusy { - return ErrBusy - } - return fmt.Errorf("devicemapper: Error running DeleteDevice %s", err) - } - return nil -} - -// ActivateDevice activates the device identified by the specified -// poolName, name and deviceID with the specified size. -func ActivateDevice(poolName string, name string, deviceID int, size uint64) error { - return activateDevice(poolName, name, deviceID, size, "") -} - -// ActivateDeviceWithExternal activates the device identified by the specified -// poolName, name and deviceID with the specified size. -func ActivateDeviceWithExternal(poolName string, name string, deviceID int, size uint64, external string) error { - return activateDevice(poolName, name, deviceID, size, external) -} - -func activateDevice(poolName string, name string, deviceID int, size uint64, external string) error { - task, err := TaskCreateNamed(deviceCreate, name) - if task == nil { - return err - } - - var params string - if len(external) > 0 { - params = fmt.Sprintf("%s %d %s", poolName, deviceID, external) - } else { - params = fmt.Sprintf("%s %d", poolName, deviceID) - } - if err := task.addTarget(0, size/512, "thin", params); err != nil { - return fmt.Errorf("devicemapper: Can't add target %s", err) - } - if err := task.setAddNode(addNodeOnCreate); err != nil { - return fmt.Errorf("devicemapper: Can't add node %s", err) - } - - cookie := new(uint) - if err := task.setCookie(cookie, 0); err != nil { - return fmt.Errorf("devicemapper: Can't set cookie %s", err) - } - - defer UdevWait(cookie) - - if err := task.run(); err != nil { - return fmt.Errorf("devicemapper: Error running deviceCreate (ActivateDevice) %s", err) - } - - return nil -} - -// CreateSnapDeviceRaw creates a snapshot device. Caller needs to suspend and resume the origin device if it is active. -func CreateSnapDeviceRaw(poolName string, deviceID int, baseDeviceID int) error { - task, err := TaskCreateNamed(deviceTargetMsg, poolName) - if task == nil { - return err - } - - if err := task.setSector(0); err != nil { - return fmt.Errorf("devicemapper: Can't set sector %s", err) - } - - if err := task.setMessage(fmt.Sprintf("create_snap %d %d", deviceID, baseDeviceID)); err != nil { - return fmt.Errorf("devicemapper: Can't set message %s", err) - } - - dmSawExist = false // reset before the task is run - if err := task.run(); err != nil { - // Caller wants to know about ErrDeviceIDExists so that it can try with a different device id. - if dmSawExist { - return ErrDeviceIDExists - } - return fmt.Errorf("devicemapper: Error running deviceCreate (CreateSnapDeviceRaw) %s", err) - } - - return nil -} - -// CreateSnapDevice creates a snapshot based on the device identified by the baseName and baseDeviceId, -func CreateSnapDevice(poolName string, deviceID int, baseName string, baseDeviceID int) error { - devinfo, _ := GetInfo(baseName) - doSuspend := devinfo != nil && devinfo.Exists != 0 - - if doSuspend { - if err := SuspendDevice(baseName); err != nil { - return err - } - } - - if err := CreateSnapDeviceRaw(poolName, deviceID, baseDeviceID); err != nil { - if doSuspend { - if err2 := ResumeDevice(baseName); err2 != nil { - return fmt.Errorf("CreateSnapDeviceRaw Error: (%v): ResumeDevice Error: %w", err, err2) - } - } - return err - } - - if doSuspend { - if err := ResumeDevice(baseName); err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go deleted file mode 100644 index 6cfef0a5bf..0000000000 --- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go +++ /dev/null @@ -1,123 +0,0 @@ -//go:build linux && cgo -// +build linux,cgo - -package devicemapper - -import "C" - -import ( - "fmt" - "strings" - - "github.com/sirupsen/logrus" -) - -// DevmapperLogger defines methods required to register as a callback for -// logging events received from devicemapper. Note that devicemapper will send -// *all* logs regardless to callbacks (including debug logs) so it's -// recommended to not spam the console with the outputs. -type DevmapperLogger interface { - // DMLog is the logging callback containing all of the information from - // devicemapper. The interface is identical to the C libdm counterpart. - DMLog(level int, file string, line int, dmError int, message string) -} - -// dmLogger is the current logger in use that is being forwarded our messages. -var dmLogger DevmapperLogger - -// LogInit changes the logging callback called after processing libdm logs for -// error message information. The default logger simply forwards all logs to -// logrus. Calling LogInit(nil) disables the calling of callbacks. -func LogInit(logger DevmapperLogger) { - dmLogger = logger -} - -// Due to the way cgo works this has to be in a separate file, as devmapper.go has -// definitions in the cgo block, which is incompatible with using "//export" - -// StorageDevmapperLogCallback exports the devmapper log callback for cgo. Note that -// because we are using callbacks, this function will be called for *every* log -// in libdm (even debug ones because there's no way of setting the verbosity -// level for an external logging callback). -// -//export StorageDevmapperLogCallback -func StorageDevmapperLogCallback(level C.int, file *C.char, line, dmErrnoOrClass C.int, message *C.char) { - msg := C.GoString(message) - - // Track what errno libdm saw, because the library only gives us 0 or 1. - if level < LogLevelDebug { - if strings.Contains(msg, "busy") { - dmSawBusy = true - } - - if strings.Contains(msg, "File exists") { - dmSawExist = true - } - - if strings.Contains(msg, "No such device or address") { - dmSawEnxio = true - } - } - - if dmLogger != nil { - dmLogger.DMLog(int(level), C.GoString(file), int(line), int(dmErrnoOrClass), msg) - } -} - -// DefaultLogger is the default logger used by pkg/devicemapper. It forwards -// all logs that are of higher or equal priority to the given level to the -// corresponding logrus level. -type DefaultLogger struct { - // Level corresponds to the highest libdm level that will be forwarded to - // logrus. In order to change this, register a new DefaultLogger. - Level int -} - -// DMLog is the logging callback containing all of the information from -// devicemapper. The interface is identical to the C libdm counterpart. -func (l DefaultLogger) DMLog(level int, file string, line, dmError int, message string) { - if level <= l.Level { - // Forward the log to the correct logrus level, if allowed by dmLogLevel. - logMsg := fmt.Sprintf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) - switch level { - case LogLevelFatal, LogLevelErr: - logrus.Error(logMsg) - case LogLevelWarn: - logrus.Warn(logMsg) - case LogLevelNotice, LogLevelInfo: - logrus.Info(logMsg) - case LogLevelDebug: - logrus.Debug(logMsg) - default: - // Don't drop any "unknown" levels. - logrus.Info(logMsg) - } - } -} - -// registerLogCallback registers our own logging callback function for libdm -// (which is StorageDevmapperLogCallback). -// -// Because libdm only gives us {0,1} error codes we need to parse the logs -// produced by libdm (to set dmSawBusy and so on). Note that by registering a -// callback using StorageDevmapperLogCallback, libdm will no longer output logs to -// stderr so we have to log everything ourselves. None of this handling is -// optional because we depend on log callbacks to parse the logs, and if we -// don't forward the log information we'll be in a lot of trouble when -// debugging things. -func registerLogCallback() { - LogWithErrnoInit() -} - -func init() { - // Use the default logger by default. We only allow LogLevelFatal by - // default, because internally we mask a lot of libdm errors by retrying - // and similar tricks. Also, libdm is very chatty and we don't want to - // worry users for no reason. - dmLogger = DefaultLogger{ - Level: LogLevelFatal, - } - - // Register as early as possible so we don't miss anything. - registerLogCallback() -} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go deleted file mode 100644 index 9aef4c2fb9..0000000000 --- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go +++ /dev/null @@ -1,252 +0,0 @@ -//go:build linux && cgo -// +build linux,cgo - -package devicemapper - -/* -#define _GNU_SOURCE -#include -#include // FIXME: present only for BLKGETSIZE64, maybe we can remove it? - -// FIXME: Can't we find a way to do the logging in pure Go? -extern void StorageDevmapperLogCallback(int level, char *file, int line, int dm_errno_or_class, char *str); - -static void log_cb(int level, const char *file, int line, int dm_errno_or_class, const char *f, ...) -{ - char *buffer = NULL; - va_list ap; - int ret; - - va_start(ap, f); - ret = vasprintf(&buffer, f, ap); - va_end(ap); - if (ret < 0) { - // memory allocation failed -- should never happen? - return; - } - - StorageDevmapperLogCallback(level, (char *)file, line, dm_errno_or_class, buffer); - free(buffer); -} - -static void log_with_errno_init() -{ - dm_log_with_errno_init(log_cb); -} -*/ -import "C" - -import ( - "reflect" - "unsafe" -) - -type ( - cdmTask C.struct_dm_task -) - -// IOCTL consts -const ( - BlkGetSize64 = C.BLKGETSIZE64 - BlkDiscard = C.BLKDISCARD -) - -// Devicemapper cookie flags. -const ( - DmUdevDisableSubsystemRulesFlag = C.DM_UDEV_DISABLE_SUBSYSTEM_RULES_FLAG - DmUdevDisableDiskRulesFlag = C.DM_UDEV_DISABLE_DISK_RULES_FLAG - DmUdevDisableOtherRulesFlag = C.DM_UDEV_DISABLE_OTHER_RULES_FLAG - DmUdevDisableLibraryFallback = C.DM_UDEV_DISABLE_LIBRARY_FALLBACK -) - -// DeviceMapper mapped functions. -var ( - DmGetLibraryVersion = dmGetLibraryVersionFct - DmGetNextTarget = dmGetNextTargetFct - DmSetDevDir = dmSetDevDirFct - DmTaskAddTarget = dmTaskAddTargetFct - DmTaskCreate = dmTaskCreateFct - DmTaskDestroy = dmTaskDestroyFct - DmTaskGetDeps = dmTaskGetDepsFct - DmTaskGetInfo = dmTaskGetInfoFct - DmTaskGetDriverVersion = dmTaskGetDriverVersionFct - DmTaskRun = dmTaskRunFct - DmTaskSetAddNode = dmTaskSetAddNodeFct - DmTaskSetCookie = dmTaskSetCookieFct - DmTaskSetMessage = dmTaskSetMessageFct - DmTaskSetName = dmTaskSetNameFct - DmTaskSetRo = dmTaskSetRoFct - DmTaskSetSector = dmTaskSetSectorFct - DmUdevWait = dmUdevWaitFct - DmUdevSetSyncSupport = dmUdevSetSyncSupportFct - DmUdevGetSyncSupport = dmUdevGetSyncSupportFct - DmCookieSupported = dmCookieSupportedFct - LogWithErrnoInit = logWithErrnoInitFct - DmTaskDeferredRemove = dmTaskDeferredRemoveFct - DmTaskGetInfoWithDeferred = dmTaskGetInfoWithDeferredFct -) - -func free(p *C.char) { - C.free(unsafe.Pointer(p)) -} - -func dmTaskDestroyFct(task *cdmTask) { - C.dm_task_destroy((*C.struct_dm_task)(task)) -} - -func dmTaskCreateFct(taskType int) *cdmTask { - return (*cdmTask)(C.dm_task_create(C.int(taskType))) -} - -func dmTaskRunFct(task *cdmTask) int { - ret, _ := C.dm_task_run((*C.struct_dm_task)(task)) - return int(ret) -} - -func dmTaskSetNameFct(task *cdmTask, name string) int { - Cname := C.CString(name) - defer free(Cname) - - return int(C.dm_task_set_name((*C.struct_dm_task)(task), Cname)) -} - -func dmTaskSetMessageFct(task *cdmTask, message string) int { - Cmessage := C.CString(message) - defer free(Cmessage) - - return int(C.dm_task_set_message((*C.struct_dm_task)(task), Cmessage)) -} - -func dmTaskSetSectorFct(task *cdmTask, sector uint64) int { - return int(C.dm_task_set_sector((*C.struct_dm_task)(task), C.uint64_t(sector))) -} - -func dmTaskSetCookieFct(task *cdmTask, cookie *uint, flags uint16) int { - cCookie := C.uint32_t(*cookie) - defer func() { - *cookie = uint(cCookie) - }() - return int(C.dm_task_set_cookie((*C.struct_dm_task)(task), &cCookie, C.uint16_t(flags))) -} - -func dmTaskSetAddNodeFct(task *cdmTask, addNode AddNodeType) int { - return int(C.dm_task_set_add_node((*C.struct_dm_task)(task), C.dm_add_node_t(addNode))) -} - -func dmTaskSetRoFct(task *cdmTask) int { - return int(C.dm_task_set_ro((*C.struct_dm_task)(task))) -} - -func dmTaskAddTargetFct(task *cdmTask, - start, size uint64, ttype, params string, -) int { - Cttype := C.CString(ttype) - defer free(Cttype) - - Cparams := C.CString(params) - defer free(Cparams) - - return int(C.dm_task_add_target((*C.struct_dm_task)(task), C.uint64_t(start), C.uint64_t(size), Cttype, Cparams)) -} - -func dmTaskGetDepsFct(task *cdmTask) *Deps { - Cdeps := C.dm_task_get_deps((*C.struct_dm_task)(task)) - if Cdeps == nil { - return nil - } - - // golang issue: https://github.com/golang/go/issues/11925 - var devices []C.uint64_t - devicesHdr := (*reflect.SliceHeader)(unsafe.Pointer(&devices)) - devicesHdr.Data = uintptr(unsafe.Pointer(uintptr(unsafe.Pointer(Cdeps)) + unsafe.Sizeof(*Cdeps))) - devicesHdr.Len = int(Cdeps.count) - devicesHdr.Cap = int(Cdeps.count) - - deps := &Deps{ - Count: uint32(Cdeps.count), - Filler: uint32(Cdeps.filler), - } - for _, device := range devices { - deps.Device = append(deps.Device, uint64(device)) - } - return deps -} - -func dmTaskGetInfoFct(task *cdmTask, info *Info) int { - Cinfo := C.struct_dm_info{} - defer func() { - info.Exists = int(Cinfo.exists) - info.Suspended = int(Cinfo.suspended) - info.LiveTable = int(Cinfo.live_table) - info.InactiveTable = int(Cinfo.inactive_table) - info.OpenCount = int32(Cinfo.open_count) - info.EventNr = uint32(Cinfo.event_nr) - info.Major = uint32(Cinfo.major) - info.Minor = uint32(Cinfo.minor) - info.ReadOnly = int(Cinfo.read_only) - info.TargetCount = int32(Cinfo.target_count) - }() - return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo)) -} - -func dmTaskGetDriverVersionFct(task *cdmTask) string { - buffer := C.malloc(128) - defer C.free(buffer) - res := C.dm_task_get_driver_version((*C.struct_dm_task)(task), (*C.char)(buffer), 128) - if res == 0 { - return "" - } - return C.GoString((*C.char)(buffer)) -} - -func dmGetNextTargetFct(task *cdmTask, next unsafe.Pointer, start, length *uint64, target, params *string) unsafe.Pointer { - var ( - Cstart, Clength C.uint64_t - CtargetType, Cparams *C.char - ) - defer func() { - *start = uint64(Cstart) - *length = uint64(Clength) - *target = C.GoString(CtargetType) - *params = C.GoString(Cparams) - }() - - nextp := C.dm_get_next_target((*C.struct_dm_task)(task), next, &Cstart, &Clength, &CtargetType, &Cparams) - return nextp -} - -func dmUdevSetSyncSupportFct(syncWithUdev int) { - (C.dm_udev_set_sync_support(C.int(syncWithUdev))) -} - -func dmUdevGetSyncSupportFct() int { - return int(C.dm_udev_get_sync_support()) -} - -func dmUdevWaitFct(cookie uint) int { - return int(C.dm_udev_wait(C.uint32_t(cookie))) -} - -func dmCookieSupportedFct() int { - return int(C.dm_cookie_supported()) -} - -func logWithErrnoInitFct() { - C.log_with_errno_init() -} - -func dmSetDevDirFct(dir string) int { - Cdir := C.CString(dir) - defer free(Cdir) - - return int(C.dm_set_dev_dir(Cdir)) -} - -func dmGetLibraryVersionFct(version *string) int { - buffer := C.CString(string(make([]byte, 128))) - defer free(buffer) - defer func() { - *version = C.GoString(buffer) - }() - return int(C.dm_get_library_version(buffer, 128)) -} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go deleted file mode 100644 index 071f7f35b1..0000000000 --- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go +++ /dev/null @@ -1,32 +0,0 @@ -//go:build linux && cgo && !libdm_no_deferred_remove -// +build linux,cgo,!libdm_no_deferred_remove - -package devicemapper - -// #include -import "C" - -// LibraryDeferredRemovalSupport tells if the feature is enabled in the build -const LibraryDeferredRemovalSupport = true - -func dmTaskDeferredRemoveFct(task *cdmTask) int { - return int(C.dm_task_deferred_remove((*C.struct_dm_task)(task))) -} - -func dmTaskGetInfoWithDeferredFct(task *cdmTask, info *Info) int { - Cinfo := C.struct_dm_info{} - defer func() { - info.Exists = int(Cinfo.exists) - info.Suspended = int(Cinfo.suspended) - info.LiveTable = int(Cinfo.live_table) - info.InactiveTable = int(Cinfo.inactive_table) - info.OpenCount = int32(Cinfo.open_count) - info.EventNr = uint32(Cinfo.event_nr) - info.Major = uint32(Cinfo.major) - info.Minor = uint32(Cinfo.minor) - info.ReadOnly = int(Cinfo.read_only) - info.TargetCount = int32(Cinfo.target_count) - info.DeferredRemove = int(Cinfo.deferred_remove) - }() - return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo)) -} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go deleted file mode 100644 index 93dcc32219..0000000000 --- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go +++ /dev/null @@ -1,7 +0,0 @@ -//go:build linux && cgo && !static_build -// +build linux,cgo,!static_build - -package devicemapper - -// #cgo pkg-config: devmapper -import "C" diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go deleted file mode 100644 index 91906f2efe..0000000000 --- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build linux && cgo && libdm_no_deferred_remove -// +build linux,cgo,libdm_no_deferred_remove - -package devicemapper - -// LibraryDeferredRemovalSupport tells if the feature is enabled in the build -const LibraryDeferredRemovalSupport = false - -func dmTaskDeferredRemoveFct(task *cdmTask) int { - // Error. Nobody should be calling it. - return -1 -} - -func dmTaskGetInfoWithDeferredFct(task *cdmTask, info *Info) int { - return -1 -} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go deleted file mode 100644 index 68ea48fe57..0000000000 --- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go +++ /dev/null @@ -1,7 +0,0 @@ -//go:build linux && cgo && static_build -// +build linux,cgo,static_build - -package devicemapper - -// #cgo pkg-config: --static devmapper -import "C" diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go b/vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go deleted file mode 100644 index 90ffe2c3fd..0000000000 --- a/vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go +++ /dev/null @@ -1,29 +0,0 @@ -//go:build linux && cgo -// +build linux,cgo - -package devicemapper - -import ( - "unsafe" - - "golang.org/x/sys/unix" -) - -func ioctlBlkGetSize64(fd uintptr) (int64, error) { - var size int64 - if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, BlkGetSize64, uintptr(unsafe.Pointer(&size))); err != 0 { - return 0, err - } - return size, nil -} - -func ioctlBlkDiscard(fd uintptr, offset, length uint64) error { - var r [2]uint64 - r[0] = offset - r[1] = length - - if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, BlkDiscard, uintptr(unsafe.Pointer(&r[0]))); err != 0 { - return err - } - return nil -} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/log.go b/vendor/github.com/containers/storage/pkg/devicemapper/log.go deleted file mode 100644 index cee5e54549..0000000000 --- a/vendor/github.com/containers/storage/pkg/devicemapper/log.go +++ /dev/null @@ -1,11 +0,0 @@ -package devicemapper - -// definitions from lvm2 lib/log/log.h -const ( - LogLevelFatal = 2 + iota // _LOG_FATAL - LogLevelErr // _LOG_ERR - LogLevelWarn // _LOG_WARN - LogLevelNotice // _LOG_NOTICE - LogLevelInfo // _LOG_INFO - LogLevelDebug // _LOG_DEBUG -) diff --git a/vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux.go b/vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux.go deleted file mode 100644 index e883d25f51..0000000000 --- a/vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux.go +++ /dev/null @@ -1,21 +0,0 @@ -//go:build linux -// +build linux - -package dmesg - -import ( - "unsafe" - - "golang.org/x/sys/unix" -) - -// Dmesg returns last messages from the kernel log, up to size bytes -func Dmesg(size int) []byte { - t := uintptr(3) // SYSLOG_ACTION_READ_ALL - b := make([]byte, size) - amt, _, err := unix.Syscall(unix.SYS_SYSLOG, t, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b))) - if err != 0 { - return []byte{} - } - return b[:amt] -} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go index ed8cca2c62..b19b4c5908 100644 --- a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go @@ -1,35 +1,21 @@ -//go:build linux || freebsd || solaris || openbsd -// +build linux freebsd solaris openbsd - // Package kernel provides helper function to get, parse and compare kernel // versions for different platforms. package kernel import ( - "bytes" - "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" ) // GetKernelVersion gets the current kernel version. func GetKernelVersion() (*VersionInfo, error) { - uts, err := uname() - if err != nil { - return nil, err - } + uts := &unix.Utsname{} - release := make([]byte, len(uts.Release)) - - i := 0 - for _, c := range uts.Release { - release[i] = byte(c) - i++ + if err := unix.Uname(uts); err != nil { + return nil, err } - // Remove the \x00 from the release for Atoi to parse correctly - release = release[:bytes.IndexByte(release, 0)] - - return ParseRelease(string(release)) + return ParseRelease(unix.ByteSliceToString(uts.Release[:])) } // CheckKernelVersion checks if current kernel is newer than (or equal to) diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_freebsd.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_freebsd.go deleted file mode 100644 index e913fad001..0000000000 --- a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_freebsd.go +++ /dev/null @@ -1,17 +0,0 @@ -package kernel - -import "golang.org/x/sys/unix" - -// Utsname represents the system name structure. -// It is passthrough for unix.Utsname in order to make it portable with -// other platforms where it is not available. -type Utsname unix.Utsname - -func uname() (*unix.Utsname, error) { - uts := &unix.Utsname{} - - if err := unix.Uname(uts); err != nil { - return nil, err - } - return uts, nil -} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_linux.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_linux.go deleted file mode 100644 index e913fad001..0000000000 --- a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_linux.go +++ /dev/null @@ -1,17 +0,0 @@ -package kernel - -import "golang.org/x/sys/unix" - -// Utsname represents the system name structure. -// It is passthrough for unix.Utsname in order to make it portable with -// other platforms where it is not available. -type Utsname unix.Utsname - -func uname() (*unix.Utsname, error) { - uts := &unix.Utsname{} - - if err := unix.Uname(uts); err != nil { - return nil, err - } - return uts, nil -} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_solaris.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_solaris.go deleted file mode 100644 index 49370bd3dd..0000000000 --- a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_solaris.go +++ /dev/null @@ -1,14 +0,0 @@ -package kernel - -import ( - "golang.org/x/sys/unix" -) - -func uname() (*unix.Utsname, error) { - uts := &unix.Utsname{} - - if err := unix.Uname(uts); err != nil { - return nil, err - } - return uts, nil -} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go deleted file mode 100644 index 12671db513..0000000000 --- a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go +++ /dev/null @@ -1,14 +0,0 @@ -//go:build openbsd -// +build openbsd - -package kernel - -import ( - "fmt" - "runtime" -) - -// A stub called by kernel_unix.go . -func uname() (*Utsname, error) { - return nil, fmt.Errorf("Kernel version detection is not available on %s", runtime.GOOS) -} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported_type.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported_type.go deleted file mode 100644 index f515500c92..0000000000 --- a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported_type.go +++ /dev/null @@ -1,11 +0,0 @@ -//go:build !linux && !solaris && !freebsd -// +build !linux,!solaris,!freebsd - -package kernel - -// Utsname represents the system name structure. -// It is defined here to make it portable as it is available on linux but not -// on windows. -type Utsname struct { - Release [65]byte -} diff --git a/vendor/github.com/containers/storage/storage.conf b/vendor/github.com/containers/storage/storage.conf index 924e8f13a5..0f8d1f0249 100644 --- a/vendor/github.com/containers/storage/storage.conf +++ b/vendor/github.com/containers/storage/storage.conf @@ -173,79 +173,3 @@ mountopt = "nodev" # "force_mask" permissions. # # force_mask = "" - -[storage.options.thinpool] -# Storage Options for thinpool - -# autoextend_percent determines the amount by which pool needs to be -# grown. This is specified in terms of % of pool size. So a value of 20 means -# that when threshold is hit, pool will be grown by 20% of existing -# pool size. -# autoextend_percent = "20" - -# autoextend_threshold determines the pool extension threshold in terms -# of percentage of pool size. For example, if threshold is 60, that means when -# pool is 60% full, threshold has been hit. -# autoextend_threshold = "80" - -# basesize specifies the size to use when creating the base device, which -# limits the size of images and containers. -# basesize = "10G" - -# blocksize specifies a custom blocksize to use for the thin pool. -# blocksize="64k" - -# directlvm_device specifies a custom block storage device to use for the -# thin pool. Required if you setup devicemapper. -# directlvm_device = "" - -# directlvm_device_force wipes device even if device already has a filesystem. -# directlvm_device_force = "True" - -# fs specifies the filesystem type to use for the base device. -# fs="xfs" - -# log_level sets the log level of devicemapper. -# 0: LogLevelSuppress 0 (Default) -# 2: LogLevelFatal -# 3: LogLevelErr -# 4: LogLevelWarn -# 5: LogLevelNotice -# 6: LogLevelInfo -# 7: LogLevelDebug -# log_level = "7" - -# min_free_space specifies the min free space percent in a thin pool require for -# new device creation to succeed. Valid values are from 0% - 99%. -# Value 0% disables -# min_free_space = "10%" - -# mkfsarg specifies extra mkfs arguments to be used when creating the base -# device. -# mkfsarg = "" - -# metadata_size is used to set the `pvcreate --metadatasize` options when -# creating thin devices. Default is 128k -# metadata_size = "" - -# Size is used to set a maximum size of the container image. -# size = "" - -# use_deferred_removal marks devicemapper block device for deferred removal. -# If the thinpool is in use when the driver attempts to remove it, the driver -# tells the kernel to remove it as soon as possible. Note this does not free -# up the disk space, use deferred deletion to fully remove the thinpool. -# use_deferred_removal = "True" - -# use_deferred_deletion marks thinpool device for deferred deletion. -# If the device is busy when the driver attempts to delete it, the driver -# will attempt to delete device every 30 seconds until successful. -# If the program using the driver exits, the driver will continue attempting -# to cleanup the next time the driver is used. Deferred deletion permanently -# deletes the device and all data stored in device will be lost. -# use_deferred_deletion = "True" - -# xfs_nospace_max_retries specifies the maximum number of retries XFS should -# attempt to complete IO when ENOSPC (no space) error is returned by -# underlying storage device. -# xfs_nospace_max_retries = "0" diff --git a/vendor/github.com/containers/storage/storage.conf-freebsd b/vendor/github.com/containers/storage/storage.conf-freebsd index 03bbe24619..43278a1fc5 100644 --- a/vendor/github.com/containers/storage/storage.conf-freebsd +++ b/vendor/github.com/containers/storage/storage.conf-freebsd @@ -134,79 +134,3 @@ mountopt = "nodev" # "force_mask" permissions. # # force_mask = "" - -[storage.options.thinpool] -# Storage Options for thinpool - -# autoextend_percent determines the amount by which pool needs to be -# grown. This is specified in terms of % of pool size. So a value of 20 means -# that when threshold is hit, pool will be grown by 20% of existing -# pool size. -# autoextend_percent = "20" - -# autoextend_threshold determines the pool extension threshold in terms -# of percentage of pool size. For example, if threshold is 60, that means when -# pool is 60% full, threshold has been hit. -# autoextend_threshold = "80" - -# basesize specifies the size to use when creating the base device, which -# limits the size of images and containers. -# basesize = "10G" - -# blocksize specifies a custom blocksize to use for the thin pool. -# blocksize="64k" - -# directlvm_device specifies a custom block storage device to use for the -# thin pool. Required if you setup devicemapper. -# directlvm_device = "" - -# directlvm_device_force wipes device even if device already has a filesystem. -# directlvm_device_force = "True" - -# fs specifies the filesystem type to use for the base device. -# fs="xfs" - -# log_level sets the log level of devicemapper. -# 0: LogLevelSuppress 0 (Default) -# 2: LogLevelFatal -# 3: LogLevelErr -# 4: LogLevelWarn -# 5: LogLevelNotice -# 6: LogLevelInfo -# 7: LogLevelDebug -# log_level = "7" - -# min_free_space specifies the min free space percent in a thin pool require for -# new device creation to succeed. Valid values are from 0% - 99%. -# Value 0% disables -# min_free_space = "10%" - -# mkfsarg specifies extra mkfs arguments to be used when creating the base -# device. -# mkfsarg = "" - -# metadata_size is used to set the `pvcreate --metadatasize` options when -# creating thin devices. Default is 128k -# metadata_size = "" - -# Size is used to set a maximum size of the container image. -# size = "" - -# use_deferred_removal marks devicemapper block device for deferred removal. -# If the thinpool is in use when the driver attempts to remove it, the driver -# tells the kernel to remove it as soon as possible. Note this does not free -# up the disk space, use deferred deletion to fully remove the thinpool. -# use_deferred_removal = "True" - -# use_deferred_deletion marks thinpool device for deferred deletion. -# If the device is busy when the driver attempts to delete it, the driver -# will attempt to delete device every 30 seconds until successful. -# If the program using the driver exits, the driver will continue attempting -# to cleanup the next time the driver is used. Deferred deletion permanently -# deletes the device and all data stored in device will be lost. -# use_deferred_deletion = "True" - -# xfs_nospace_max_retries specifies the maximum number of retries XFS should -# attempt to complete IO when ENOSPC (no space) error is returned by -# underlying storage device. -# xfs_nospace_max_retries = "0" diff --git a/vendor/github.com/containers/storage/types/storage_test.conf b/vendor/github.com/containers/storage/types/storage_test.conf index 87b0c9bb16..c42d33fb9e 100644 --- a/vendor/github.com/containers/storage/types/storage_test.conf +++ b/vendor/github.com/containers/storage/types/storage_test.conf @@ -39,7 +39,3 @@ remap-gids = "0:1500000000:60000" # mountopt specifies comma separated list of extra mount options mountopt = "nodev" - - -[storage.options.thinpool] -# Storage Options for thinpool diff --git a/vendor/github.com/coreos/go-oidc/v3/oidc/jose.go b/vendor/github.com/coreos/go-oidc/v3/oidc/jose.go index b7bd09275d..f42d37d481 100644 --- a/vendor/github.com/coreos/go-oidc/v3/oidc/jose.go +++ b/vendor/github.com/coreos/go-oidc/v3/oidc/jose.go @@ -1,5 +1,7 @@ package oidc +import jose "github.com/go-jose/go-jose/v4" + // JOSE asymmetric signing algorithm values as defined by RFC 7518 // // see: https://tools.ietf.org/html/rfc7518#section-3.1 @@ -15,3 +17,16 @@ const ( PS512 = "PS512" // RSASSA-PSS using SHA512 and MGF1-SHA512 EdDSA = "EdDSA" // Ed25519 using SHA-512 ) + +var allAlgs = []jose.SignatureAlgorithm{ + jose.RS256, + jose.RS384, + jose.RS512, + jose.ES256, + jose.ES384, + jose.ES512, + jose.PS256, + jose.PS384, + jose.PS512, + jose.EdDSA, +} diff --git a/vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go b/vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go index b1e3f7e3ff..9a70c14328 100644 --- a/vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go +++ b/vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go @@ -13,7 +13,7 @@ import ( "sync" "time" - jose "github.com/go-jose/go-jose/v3" + jose "github.com/go-jose/go-jose/v4" ) // StaticKeySet is a verifier that validates JWT against a static set of public keys. @@ -25,7 +25,9 @@ type StaticKeySet struct { // VerifySignature compares the signature against a static set of public keys. func (s *StaticKeySet) VerifySignature(ctx context.Context, jwt string) ([]byte, error) { - jws, err := jose.ParseSigned(jwt) + // Algorithms are already checked by Verifier, so this parse method accepts + // any algorithm. + jws, err := jose.ParseSigned(jwt, allAlgs) if err != nil { return nil, fmt.Errorf("parsing jwt: %v", err) } @@ -127,8 +129,13 @@ var parsedJWTKey contextKey func (r *RemoteKeySet) VerifySignature(ctx context.Context, jwt string) ([]byte, error) { jws, ok := ctx.Value(parsedJWTKey).(*jose.JSONWebSignature) if !ok { + // The algorithm values are already enforced by the Validator, which also sets + // the context value above to pre-parsed signature. + // + // Practically, this codepath isn't called in normal use of this package, but + // if it is, the algorithms have already been checked. var err error - jws, err = jose.ParseSigned(jwt) + jws, err = jose.ParseSigned(jwt, allAlgs) if err != nil { return nil, fmt.Errorf("oidc: malformed jwt: %v", err) } diff --git a/vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go b/vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go index b7db3c7342..17419f3883 100644 --- a/vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go +++ b/vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go @@ -79,7 +79,7 @@ func getClient(ctx context.Context) *http.Client { // provider, err := oidc.NewProvider(ctx, discoveryBaseURL) // // This is insecure because validating the correct issuer is critical for multi-tenant -// proivders. Any overrides here MUST be carefully reviewed. +// providers. Any overrides here MUST be carefully reviewed. func InsecureIssuerURLContext(ctx context.Context, issuerURL string) context.Context { return context.WithValue(ctx, issuerURLKey, issuerURL) } diff --git a/vendor/github.com/coreos/go-oidc/v3/oidc/verify.go b/vendor/github.com/coreos/go-oidc/v3/oidc/verify.go index 0bca49a899..0ac58d2995 100644 --- a/vendor/github.com/coreos/go-oidc/v3/oidc/verify.go +++ b/vendor/github.com/coreos/go-oidc/v3/oidc/verify.go @@ -12,7 +12,7 @@ import ( "strings" "time" - jose "github.com/go-jose/go-jose/v3" + jose "github.com/go-jose/go-jose/v4" "golang.org/x/oauth2" ) @@ -310,7 +310,16 @@ func (v *IDTokenVerifier) Verify(ctx context.Context, rawIDToken string) (*IDTok return t, nil } - jws, err := jose.ParseSigned(rawIDToken) + var supportedSigAlgs []jose.SignatureAlgorithm + for _, alg := range v.config.SupportedSigningAlgs { + supportedSigAlgs = append(supportedSigAlgs, jose.SignatureAlgorithm(alg)) + } + if len(supportedSigAlgs) == 0 { + // If no algorithms were specified by both the config and discovery, default + // to the one mandatory algorithm "RS256". + supportedSigAlgs = []jose.SignatureAlgorithm{jose.RS256} + } + jws, err := jose.ParseSigned(rawIDToken, supportedSigAlgs) if err != nil { return nil, fmt.Errorf("oidc: malformed jwt: %v", err) } @@ -322,17 +331,7 @@ func (v *IDTokenVerifier) Verify(ctx context.Context, rawIDToken string) (*IDTok default: return nil, fmt.Errorf("oidc: multiple signatures on id token not supported") } - sig := jws.Signatures[0] - supportedSigAlgs := v.config.SupportedSigningAlgs - if len(supportedSigAlgs) == 0 { - supportedSigAlgs = []string{RS256} - } - - if !contains(supportedSigAlgs, sig.Header.Algorithm) { - return nil, fmt.Errorf("oidc: id token signed with unsupported algorithm, expected %q got %q", supportedSigAlgs, sig.Header.Algorithm) - } - t.sigAlgorithm = sig.Header.Algorithm ctx = context.WithValue(ctx, parsedJWTKey, jws) diff --git a/vendor/github.com/go-jose/go-jose/v4/.gitignore b/vendor/github.com/go-jose/go-jose/v4/.gitignore new file mode 100644 index 0000000000..eb29ebaefd --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v4/.gitignore @@ -0,0 +1,2 @@ +jose-util/jose-util +jose-util.t.err \ No newline at end of file diff --git a/vendor/github.com/go-jose/go-jose/v4/.golangci.yml b/vendor/github.com/go-jose/go-jose/v4/.golangci.yml new file mode 100644 index 0000000000..2a577a8f95 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v4/.golangci.yml @@ -0,0 +1,53 @@ +# https://github.com/golangci/golangci-lint + +run: + skip-files: + - doc_test.go + modules-download-mode: readonly + +linters: + enable-all: true + disable: + - gochecknoglobals + - goconst + - lll + - maligned + - nakedret + - scopelint + - unparam + - funlen # added in 1.18 (requires go-jose changes before it can be enabled) + +linters-settings: + gocyclo: + min-complexity: 35 + +issues: + exclude-rules: + - text: "don't use ALL_CAPS in Go names" + linters: + - golint + - text: "hardcoded credentials" + linters: + - gosec + - text: "weak cryptographic primitive" + linters: + - gosec + - path: json/ + linters: + - dupl + - errcheck + - gocritic + - gocyclo + - golint + - govet + - ineffassign + - staticcheck + - structcheck + - stylecheck + - unused + - path: _test\.go + linters: + - scopelint + - path: jwk.go + linters: + - gocyclo diff --git a/vendor/github.com/go-jose/go-jose/v4/.travis.yml b/vendor/github.com/go-jose/go-jose/v4/.travis.yml new file mode 100644 index 0000000000..48de631b00 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v4/.travis.yml @@ -0,0 +1,33 @@ +language: go + +matrix: + fast_finish: true + allow_failures: + - go: tip + +go: + - "1.13.x" + - "1.14.x" + - tip + +before_script: + - export PATH=$HOME/.local/bin:$PATH + +before_install: + - go get -u github.com/mattn/goveralls github.com/wadey/gocovmerge + - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.18.0 + - pip install cram --user + +script: + - go test -v -covermode=count -coverprofile=profile.cov . + - go test -v -covermode=count -coverprofile=cryptosigner/profile.cov ./cryptosigner + - go test -v -covermode=count -coverprofile=cipher/profile.cov ./cipher + - go test -v -covermode=count -coverprofile=jwt/profile.cov ./jwt + - go test -v ./json # no coverage for forked encoding/json package + - golangci-lint run + - cd jose-util && go build && PATH=$PWD:$PATH cram -v jose-util.t # cram tests jose-util + - cd .. + +after_success: + - gocovmerge *.cov */*.cov > merged.coverprofile + - goveralls -coverprofile merged.coverprofile -service=travis-ci diff --git a/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md b/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md new file mode 100644 index 0000000000..28bdd2fc08 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md @@ -0,0 +1,72 @@ +# v4.0.1 + +## Fixed + + - An attacker could send a JWE containing compressed data that used large + amounts of memory and CPU when decompressed by `Decrypt` or `DecryptMulti`. + Those functions now return an error if the decompressed data would exceed + 250kB or 10x the compressed size (whichever is larger). Thanks to + Enze Wang@Alioth and Jianjun Chen@Zhongguancun Lab (@zer0yu and @chenjj) + for reporting. + +# v4.0.0 + +This release makes some breaking changes in order to more thoroughly +address the vulnerabilities discussed in [Three New Attacks Against JSON Web +Tokens][1], "Sign/encrypt confusion", "Billion hash attack", and "Polyglot +token". + +## Changed + + - Limit JWT encryption types (exclude password or public key types) (#78) + - Enforce minimum length for HMAC keys (#85) + - jwt: match any audience in a list, rather than requiring all audiences (#81) + - jwt: accept only Compact Serialization (#75) + - jws: Add expected algorithms for signatures (#74) + - Require specifying expected algorithms for ParseEncrypted, + ParseSigned, ParseDetached, jwt.ParseEncrypted, jwt.ParseSigned, + jwt.ParseSignedAndEncrypted (#69, #74) + - Usually there is a small, known set of appropriate algorithms for a program + to use and it's a mistake to allow unexpected algorithms. For instance the + "billion hash attack" relies in part on programs accepting the PBES2 + encryption algorithm and doing the necessary work even if they weren't + specifically configured to allow PBES2. + - Revert "Strip padding off base64 strings" (#82) + - The specs require base64url encoding without padding. + - Minimum supported Go version is now 1.21 + +## Added + + - ParseSignedCompact, ParseSignedJSON, ParseEncryptedCompact, ParseEncryptedJSON. + - These allow parsing a specific serialization, as opposed to ParseSigned and + ParseEncrypted, which try to automatically detect which serialization was + provided. It's common to require a specific serialization for a specific + protocol - for instance JWT requires Compact serialization. + +[1]: https://i.blackhat.com/BH-US-23/Presentations/US-23-Tervoort-Three-New-Attacks-Against-JSON-Web-Tokens.pdf + +# v3.0.2 + +## Fixed + + - DecryptMulti: handle decompression error (#19) + +## Changed + + - jwe/CompactSerialize: improve performance (#67) + - Increase the default number of PBKDF2 iterations to 600k (#48) + - Return the proper algorithm for ECDSA keys (#45) + +## Added + + - Add Thumbprint support for opaque signers (#38) + +# v3.0.1 + +## Fixed + + - Security issue: an attacker specifying a large "p2c" value can cause + JSONWebEncryption.Decrypt and JSONWebEncryption.DecryptMulti to consume large + amounts of CPU, causing a DoS. Thanks to Matt Schwager (@mschwager) for the + disclosure and to Tom Tervoort for originally publishing the category of attack. + https://i.blackhat.com/BH-US-23/Presentations/US-23-Tervoort-Three-New-Attacks-Against-JSON-Web-Tokens.pdf diff --git a/vendor/github.com/go-jose/go-jose/v4/CONTRIBUTING.md b/vendor/github.com/go-jose/go-jose/v4/CONTRIBUTING.md new file mode 100644 index 0000000000..b63e1f8fee --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v4/CONTRIBUTING.md @@ -0,0 +1,15 @@ +# Contributing + +If you would like to contribute code to go-jose you can do so through GitHub by +forking the repository and sending a pull request. + +When submitting code, please make every effort to follow existing conventions +and style in order to keep the code as readable as possible. Please also make +sure all tests pass by running `go test`, and format your code with `go fmt`. +We also recommend using `golint` and `errcheck`. + +Before your code can be accepted into the project you must also sign the +Individual Contributor License Agreement. We use [cla-assistant.io][1] and you +will be prompted to sign once a pull request is opened. + +[1]: https://cla-assistant.io/ diff --git a/vendor/gopkg.in/yaml.v2/LICENSE b/vendor/github.com/go-jose/go-jose/v4/LICENSE similarity index 99% rename from vendor/gopkg.in/yaml.v2/LICENSE rename to vendor/github.com/go-jose/go-jose/v4/LICENSE index 8dada3edaf..d645695673 100644 --- a/vendor/gopkg.in/yaml.v2/LICENSE +++ b/vendor/github.com/go-jose/go-jose/v4/LICENSE @@ -1,3 +1,4 @@ + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -178,7 +179,7 @@ APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" + boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a @@ -186,7 +187,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright {yyyy} {name of copyright owner} + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/go-jose/go-jose/v4/README.md b/vendor/github.com/go-jose/go-jose/v4/README.md new file mode 100644 index 0000000000..79a7c5ecc8 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v4/README.md @@ -0,0 +1,114 @@ +# Go JOSE + +[![godoc](https://pkg.go.dev/badge/github.com/go-jose/go-jose/v4.svg)](https://pkg.go.dev/github.com/go-jose/go-jose/v4) +[![godoc](https://pkg.go.dev/badge/github.com/go-jose/go-jose/v4/jwt.svg)](https://pkg.go.dev/github.com/go-jose/go-jose/v4/jwt) +[![license](https://img.shields.io/badge/license-apache_2.0-blue.svg?style=flat)](https://raw.githubusercontent.com/go-jose/go-jose/master/LICENSE) +[![test](https://img.shields.io/github/checks-status/go-jose/go-jose/v4)](https://github.com/go-jose/go-jose/actions) + +Package jose aims to provide an implementation of the Javascript Object Signing +and Encryption set of standards. This includes support for JSON Web Encryption, +JSON Web Signature, and JSON Web Token standards. + +**Disclaimer**: This library contains encryption software that is subject to +the U.S. Export Administration Regulations. You may not export, re-export, +transfer or download this code or any part of it in violation of any United +States law, directive or regulation. In particular this software may not be +exported or re-exported in any form or on any media to Iran, North Sudan, +Syria, Cuba, or North Korea, or to denied persons or entities mentioned on any +US maintained blocked list. + +## Overview + +The implementation follows the +[JSON Web Encryption](https://dx.doi.org/10.17487/RFC7516) (RFC 7516), +[JSON Web Signature](https://dx.doi.org/10.17487/RFC7515) (RFC 7515), and +[JSON Web Token](https://dx.doi.org/10.17487/RFC7519) (RFC 7519) specifications. +Tables of supported algorithms are shown below. The library supports both +the compact and JWS/JWE JSON Serialization formats, and has optional support for +multiple recipients. It also comes with a small command-line utility +([`jose-util`](https://pkg.go.dev/github.com/go-jose/go-jose/jose-util)) +for dealing with JOSE messages in a shell. + +**Note**: We use a forked version of the `encoding/json` package from the Go +standard library which uses case-sensitive matching for member names (instead +of [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html)). +This is to avoid differences in interpretation of messages between go-jose and +libraries in other languages. + +### Versions + +[Version 4](https://github.com/go-jose/go-jose) +([branch](https://github.com/go-jose/go-jose/tree/main), +[doc](https://pkg.go.dev/github.com/go-jose/go-jose/v4), [releases](https://github.com/go-jose/go-jose/releases)) is the current stable version: + + import "github.com/go-jose/go-jose/v4" + +The old [square/go-jose](https://github.com/square/go-jose) repo contains the prior v1 and v2 versions, which +are still useable but not actively developed anymore. + +Version 3, in this repo, is still receiving security fixes but not functionality +updates. + +### Supported algorithms + +See below for a table of supported algorithms. Algorithm identifiers match +the names in the [JSON Web Algorithms](https://dx.doi.org/10.17487/RFC7518) +standard where possible. The Godoc reference has a list of constants. + + Key encryption | Algorithm identifier(s) + :------------------------- | :------------------------------ + RSA-PKCS#1v1.5 | RSA1_5 + RSA-OAEP | RSA-OAEP, RSA-OAEP-256 + AES key wrap | A128KW, A192KW, A256KW + AES-GCM key wrap | A128GCMKW, A192GCMKW, A256GCMKW + ECDH-ES + AES key wrap | ECDH-ES+A128KW, ECDH-ES+A192KW, ECDH-ES+A256KW + ECDH-ES (direct) | ECDH-ES1 + Direct encryption | dir1 + +1. Not supported in multi-recipient mode + + Signing / MAC | Algorithm identifier(s) + :------------------------- | :------------------------------ + RSASSA-PKCS#1v1.5 | RS256, RS384, RS512 + RSASSA-PSS | PS256, PS384, PS512 + HMAC | HS256, HS384, HS512 + ECDSA | ES256, ES384, ES512 + Ed25519 | EdDSA2 + +2. Only available in version 2 of the package + + Content encryption | Algorithm identifier(s) + :------------------------- | :------------------------------ + AES-CBC+HMAC | A128CBC-HS256, A192CBC-HS384, A256CBC-HS512 + AES-GCM | A128GCM, A192GCM, A256GCM + + Compression | Algorithm identifiers(s) + :------------------------- | ------------------------------- + DEFLATE (RFC 1951) | DEF + +### Supported key types + +See below for a table of supported key types. These are understood by the +library, and can be passed to corresponding functions such as `NewEncrypter` or +`NewSigner`. Each of these keys can also be wrapped in a JWK if desired, which +allows attaching a key id. + + Algorithm(s) | Corresponding types + :------------------------- | ------------------------------- + RSA | *[rsa.PublicKey](https://pkg.go.dev/crypto/rsa/#PublicKey), *[rsa.PrivateKey](https://pkg.go.dev/crypto/rsa/#PrivateKey) + ECDH, ECDSA | *[ecdsa.PublicKey](https://pkg.go.dev/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](https://pkg.go.dev/crypto/ecdsa/#PrivateKey) + EdDSA1 | [ed25519.PublicKey](https://pkg.go.dev/crypto/ed25519#PublicKey), [ed25519.PrivateKey](https://pkg.go.dev/crypto/ed25519#PrivateKey) + AES, HMAC | []byte + +1. Only available in version 2 or later of the package + +## Examples + +[![godoc](https://pkg.go.dev/badge/github.com/go-jose/go-jose/v4.svg)](https://pkg.go.dev/github.com/go-jose/go-jose/v4) +[![godoc](https://pkg.go.dev/badge/github.com/go-jose/go-jose/v4/jwt.svg)](https://pkg.go.dev/github.com/go-jose/go-jose/v4/jwt) + +Examples can be found in the Godoc +reference for this package. The +[`jose-util`](https://github.com/go-jose/go-jose/tree/v4/jose-util) +subdirectory also contains a small command-line utility which might be useful +as an example as well. diff --git a/vendor/github.com/go-jose/go-jose/v4/SECURITY.md b/vendor/github.com/go-jose/go-jose/v4/SECURITY.md new file mode 100644 index 0000000000..2f18a75a82 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v4/SECURITY.md @@ -0,0 +1,13 @@ +# Security Policy +This document explains how to contact the Let's Encrypt security team to report security vulnerabilities. + +## Supported Versions +| Version | Supported | +| ------- | ----------| +| >= v3 | ✓ | +| v2 | ✗ | +| v1 | ✗ | + +## Reporting a vulnerability + +Please see [https://letsencrypt.org/contact/#security](https://letsencrypt.org/contact/#security) for the email address to report a vulnerability. Ensure that the subject line for your report contains the word `vulnerability` and is descriptive. Your email should be acknowledged within 24 hours. If you do not receive a response within 24 hours, please follow-up again with another email. diff --git a/vendor/github.com/go-jose/go-jose/v4/asymmetric.go b/vendor/github.com/go-jose/go-jose/v4/asymmetric.go new file mode 100644 index 0000000000..f8d5774ef5 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v4/asymmetric.go @@ -0,0 +1,595 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jose + +import ( + "crypto" + "crypto/aes" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "errors" + "fmt" + "math/big" + + josecipher "github.com/go-jose/go-jose/v4/cipher" + "github.com/go-jose/go-jose/v4/json" +) + +// A generic RSA-based encrypter/verifier +type rsaEncrypterVerifier struct { + publicKey *rsa.PublicKey +} + +// A generic RSA-based decrypter/signer +type rsaDecrypterSigner struct { + privateKey *rsa.PrivateKey +} + +// A generic EC-based encrypter/verifier +type ecEncrypterVerifier struct { + publicKey *ecdsa.PublicKey +} + +type edEncrypterVerifier struct { + publicKey ed25519.PublicKey +} + +// A key generator for ECDH-ES +type ecKeyGenerator struct { + size int + algID string + publicKey *ecdsa.PublicKey +} + +// A generic EC-based decrypter/signer +type ecDecrypterSigner struct { + privateKey *ecdsa.PrivateKey +} + +type edDecrypterSigner struct { + privateKey ed25519.PrivateKey +} + +// newRSARecipient creates recipientKeyInfo based on the given key. +func newRSARecipient(keyAlg KeyAlgorithm, publicKey *rsa.PublicKey) (recipientKeyInfo, error) { + // Verify that key management algorithm is supported by this encrypter + switch keyAlg { + case RSA1_5, RSA_OAEP, RSA_OAEP_256: + default: + return recipientKeyInfo{}, ErrUnsupportedAlgorithm + } + + if publicKey == nil { + return recipientKeyInfo{}, errors.New("invalid public key") + } + + return recipientKeyInfo{ + keyAlg: keyAlg, + keyEncrypter: &rsaEncrypterVerifier{ + publicKey: publicKey, + }, + }, nil +} + +// newRSASigner creates a recipientSigInfo based on the given key. +func newRSASigner(sigAlg SignatureAlgorithm, privateKey *rsa.PrivateKey) (recipientSigInfo, error) { + // Verify that key management algorithm is supported by this encrypter + switch sigAlg { + case RS256, RS384, RS512, PS256, PS384, PS512: + default: + return recipientSigInfo{}, ErrUnsupportedAlgorithm + } + + if privateKey == nil { + return recipientSigInfo{}, errors.New("invalid private key") + } + + return recipientSigInfo{ + sigAlg: sigAlg, + publicKey: staticPublicKey(&JSONWebKey{ + Key: privateKey.Public(), + }), + signer: &rsaDecrypterSigner{ + privateKey: privateKey, + }, + }, nil +} + +func newEd25519Signer(sigAlg SignatureAlgorithm, privateKey ed25519.PrivateKey) (recipientSigInfo, error) { + if sigAlg != EdDSA { + return recipientSigInfo{}, ErrUnsupportedAlgorithm + } + + if privateKey == nil { + return recipientSigInfo{}, errors.New("invalid private key") + } + return recipientSigInfo{ + sigAlg: sigAlg, + publicKey: staticPublicKey(&JSONWebKey{ + Key: privateKey.Public(), + }), + signer: &edDecrypterSigner{ + privateKey: privateKey, + }, + }, nil +} + +// newECDHRecipient creates recipientKeyInfo based on the given key. +func newECDHRecipient(keyAlg KeyAlgorithm, publicKey *ecdsa.PublicKey) (recipientKeyInfo, error) { + // Verify that key management algorithm is supported by this encrypter + switch keyAlg { + case ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW: + default: + return recipientKeyInfo{}, ErrUnsupportedAlgorithm + } + + if publicKey == nil || !publicKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) { + return recipientKeyInfo{}, errors.New("invalid public key") + } + + return recipientKeyInfo{ + keyAlg: keyAlg, + keyEncrypter: &ecEncrypterVerifier{ + publicKey: publicKey, + }, + }, nil +} + +// newECDSASigner creates a recipientSigInfo based on the given key. +func newECDSASigner(sigAlg SignatureAlgorithm, privateKey *ecdsa.PrivateKey) (recipientSigInfo, error) { + // Verify that key management algorithm is supported by this encrypter + switch sigAlg { + case ES256, ES384, ES512: + default: + return recipientSigInfo{}, ErrUnsupportedAlgorithm + } + + if privateKey == nil { + return recipientSigInfo{}, errors.New("invalid private key") + } + + return recipientSigInfo{ + sigAlg: sigAlg, + publicKey: staticPublicKey(&JSONWebKey{ + Key: privateKey.Public(), + }), + signer: &ecDecrypterSigner{ + privateKey: privateKey, + }, + }, nil +} + +// Encrypt the given payload and update the object. +func (ctx rsaEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) { + encryptedKey, err := ctx.encrypt(cek, alg) + if err != nil { + return recipientInfo{}, err + } + + return recipientInfo{ + encryptedKey: encryptedKey, + header: &rawHeader{}, + }, nil +} + +// Encrypt the given payload. Based on the key encryption algorithm, +// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256). +func (ctx rsaEncrypterVerifier) encrypt(cek []byte, alg KeyAlgorithm) ([]byte, error) { + switch alg { + case RSA1_5: + return rsa.EncryptPKCS1v15(RandReader, ctx.publicKey, cek) + case RSA_OAEP: + return rsa.EncryptOAEP(sha1.New(), RandReader, ctx.publicKey, cek, []byte{}) + case RSA_OAEP_256: + return rsa.EncryptOAEP(sha256.New(), RandReader, ctx.publicKey, cek, []byte{}) + } + + return nil, ErrUnsupportedAlgorithm +} + +// Decrypt the given payload and return the content encryption key. +func (ctx rsaDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) { + return ctx.decrypt(recipient.encryptedKey, headers.getAlgorithm(), generator) +} + +// Decrypt the given payload. Based on the key encryption algorithm, +// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256). +func (ctx rsaDecrypterSigner) decrypt(jek []byte, alg KeyAlgorithm, generator keyGenerator) ([]byte, error) { + // Note: The random reader on decrypt operations is only used for blinding, + // so stubbing is meanlingless (hence the direct use of rand.Reader). + switch alg { + case RSA1_5: + defer func() { + // DecryptPKCS1v15SessionKey sometimes panics on an invalid payload + // because of an index out of bounds error, which we want to ignore. + // This has been fixed in Go 1.3.1 (released 2014/08/13), the recover() + // only exists for preventing crashes with unpatched versions. + // See: https://groups.google.com/forum/#!topic/golang-dev/7ihX6Y6kx9k + // See: https://code.google.com/p/go/source/detail?r=58ee390ff31602edb66af41ed10901ec95904d33 + _ = recover() + }() + + // Perform some input validation. + keyBytes := ctx.privateKey.PublicKey.N.BitLen() / 8 + if keyBytes != len(jek) { + // Input size is incorrect, the encrypted payload should always match + // the size of the public modulus (e.g. using a 2048 bit key will + // produce 256 bytes of output). Reject this since it's invalid input. + return nil, ErrCryptoFailure + } + + cek, _, err := generator.genKey() + if err != nil { + return nil, ErrCryptoFailure + } + + // When decrypting an RSA-PKCS1v1.5 payload, we must take precautions to + // prevent chosen-ciphertext attacks as described in RFC 3218, "Preventing + // the Million Message Attack on Cryptographic Message Syntax". We are + // therefore deliberately ignoring errors here. + _ = rsa.DecryptPKCS1v15SessionKey(rand.Reader, ctx.privateKey, jek, cek) + + return cek, nil + case RSA_OAEP: + // Use rand.Reader for RSA blinding + return rsa.DecryptOAEP(sha1.New(), rand.Reader, ctx.privateKey, jek, []byte{}) + case RSA_OAEP_256: + // Use rand.Reader for RSA blinding + return rsa.DecryptOAEP(sha256.New(), rand.Reader, ctx.privateKey, jek, []byte{}) + } + + return nil, ErrUnsupportedAlgorithm +} + +// Sign the given payload +func (ctx rsaDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { + var hash crypto.Hash + + switch alg { + case RS256, PS256: + hash = crypto.SHA256 + case RS384, PS384: + hash = crypto.SHA384 + case RS512, PS512: + hash = crypto.SHA512 + default: + return Signature{}, ErrUnsupportedAlgorithm + } + + hasher := hash.New() + + // According to documentation, Write() on hash never fails + _, _ = hasher.Write(payload) + hashed := hasher.Sum(nil) + + var out []byte + var err error + + switch alg { + case RS256, RS384, RS512: + // TODO(https://github.com/go-jose/go-jose/issues/40): As of go1.20, the + // random parameter is legacy and ignored, and it can be nil. + // https://cs.opensource.google/go/go/+/refs/tags/go1.20:src/crypto/rsa/pkcs1v15.go;l=263;bpv=0;bpt=1 + out, err = rsa.SignPKCS1v15(RandReader, ctx.privateKey, hash, hashed) + case PS256, PS384, PS512: + out, err = rsa.SignPSS(RandReader, ctx.privateKey, hash, hashed, &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthEqualsHash, + }) + } + + if err != nil { + return Signature{}, err + } + + return Signature{ + Signature: out, + protected: &rawHeader{}, + }, nil +} + +// Verify the given payload +func (ctx rsaEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { + var hash crypto.Hash + + switch alg { + case RS256, PS256: + hash = crypto.SHA256 + case RS384, PS384: + hash = crypto.SHA384 + case RS512, PS512: + hash = crypto.SHA512 + default: + return ErrUnsupportedAlgorithm + } + + hasher := hash.New() + + // According to documentation, Write() on hash never fails + _, _ = hasher.Write(payload) + hashed := hasher.Sum(nil) + + switch alg { + case RS256, RS384, RS512: + return rsa.VerifyPKCS1v15(ctx.publicKey, hash, hashed, signature) + case PS256, PS384, PS512: + return rsa.VerifyPSS(ctx.publicKey, hash, hashed, signature, nil) + } + + return ErrUnsupportedAlgorithm +} + +// Encrypt the given payload and update the object. +func (ctx ecEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) { + switch alg { + case ECDH_ES: + // ECDH-ES mode doesn't wrap a key, the shared secret is used directly as the key. + return recipientInfo{ + header: &rawHeader{}, + }, nil + case ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW: + default: + return recipientInfo{}, ErrUnsupportedAlgorithm + } + + generator := ecKeyGenerator{ + algID: string(alg), + publicKey: ctx.publicKey, + } + + switch alg { + case ECDH_ES_A128KW: + generator.size = 16 + case ECDH_ES_A192KW: + generator.size = 24 + case ECDH_ES_A256KW: + generator.size = 32 + } + + kek, header, err := generator.genKey() + if err != nil { + return recipientInfo{}, err + } + + block, err := aes.NewCipher(kek) + if err != nil { + return recipientInfo{}, err + } + + jek, err := josecipher.KeyWrap(block, cek) + if err != nil { + return recipientInfo{}, err + } + + return recipientInfo{ + encryptedKey: jek, + header: &header, + }, nil +} + +// Get key size for EC key generator +func (ctx ecKeyGenerator) keySize() int { + return ctx.size +} + +// Get a content encryption key for ECDH-ES +func (ctx ecKeyGenerator) genKey() ([]byte, rawHeader, error) { + priv, err := ecdsa.GenerateKey(ctx.publicKey.Curve, RandReader) + if err != nil { + return nil, rawHeader{}, err + } + + out := josecipher.DeriveECDHES(ctx.algID, []byte{}, []byte{}, priv, ctx.publicKey, ctx.size) + + b, err := json.Marshal(&JSONWebKey{ + Key: &priv.PublicKey, + }) + if err != nil { + return nil, nil, err + } + + headers := rawHeader{ + headerEPK: makeRawMessage(b), + } + + return out, headers, nil +} + +// Decrypt the given payload and return the content encryption key. +func (ctx ecDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) { + epk, err := headers.getEPK() + if err != nil { + return nil, errors.New("go-jose/go-jose: invalid epk header") + } + if epk == nil { + return nil, errors.New("go-jose/go-jose: missing epk header") + } + + publicKey, ok := epk.Key.(*ecdsa.PublicKey) + if publicKey == nil || !ok { + return nil, errors.New("go-jose/go-jose: invalid epk header") + } + + if !ctx.privateKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) { + return nil, errors.New("go-jose/go-jose: invalid public key in epk header") + } + + apuData, err := headers.getAPU() + if err != nil { + return nil, errors.New("go-jose/go-jose: invalid apu header") + } + apvData, err := headers.getAPV() + if err != nil { + return nil, errors.New("go-jose/go-jose: invalid apv header") + } + + deriveKey := func(algID string, size int) []byte { + return josecipher.DeriveECDHES(algID, apuData.bytes(), apvData.bytes(), ctx.privateKey, publicKey, size) + } + + var keySize int + + algorithm := headers.getAlgorithm() + switch algorithm { + case ECDH_ES: + // ECDH-ES uses direct key agreement, no key unwrapping necessary. + return deriveKey(string(headers.getEncryption()), generator.keySize()), nil + case ECDH_ES_A128KW: + keySize = 16 + case ECDH_ES_A192KW: + keySize = 24 + case ECDH_ES_A256KW: + keySize = 32 + default: + return nil, ErrUnsupportedAlgorithm + } + + key := deriveKey(string(algorithm), keySize) + block, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + return josecipher.KeyUnwrap(block, recipient.encryptedKey) +} + +func (ctx edDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { + if alg != EdDSA { + return Signature{}, ErrUnsupportedAlgorithm + } + + sig, err := ctx.privateKey.Sign(RandReader, payload, crypto.Hash(0)) + if err != nil { + return Signature{}, err + } + + return Signature{ + Signature: sig, + protected: &rawHeader{}, + }, nil +} + +func (ctx edEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { + if alg != EdDSA { + return ErrUnsupportedAlgorithm + } + ok := ed25519.Verify(ctx.publicKey, payload, signature) + if !ok { + return errors.New("go-jose/go-jose: ed25519 signature failed to verify") + } + return nil +} + +// Sign the given payload +func (ctx ecDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { + var expectedBitSize int + var hash crypto.Hash + + switch alg { + case ES256: + expectedBitSize = 256 + hash = crypto.SHA256 + case ES384: + expectedBitSize = 384 + hash = crypto.SHA384 + case ES512: + expectedBitSize = 521 + hash = crypto.SHA512 + } + + curveBits := ctx.privateKey.Curve.Params().BitSize + if expectedBitSize != curveBits { + return Signature{}, fmt.Errorf("go-jose/go-jose: expected %d bit key, got %d bits instead", expectedBitSize, curveBits) + } + + hasher := hash.New() + + // According to documentation, Write() on hash never fails + _, _ = hasher.Write(payload) + hashed := hasher.Sum(nil) + + r, s, err := ecdsa.Sign(RandReader, ctx.privateKey, hashed) + if err != nil { + return Signature{}, err + } + + keyBytes := curveBits / 8 + if curveBits%8 > 0 { + keyBytes++ + } + + // We serialize the outputs (r and s) into big-endian byte arrays and pad + // them with zeros on the left to make sure the sizes work out. Both arrays + // must be keyBytes long, and the output must be 2*keyBytes long. + rBytes := r.Bytes() + rBytesPadded := make([]byte, keyBytes) + copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) + + sBytes := s.Bytes() + sBytesPadded := make([]byte, keyBytes) + copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) + + out := append(rBytesPadded, sBytesPadded...) + + return Signature{ + Signature: out, + protected: &rawHeader{}, + }, nil +} + +// Verify the given payload +func (ctx ecEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { + var keySize int + var hash crypto.Hash + + switch alg { + case ES256: + keySize = 32 + hash = crypto.SHA256 + case ES384: + keySize = 48 + hash = crypto.SHA384 + case ES512: + keySize = 66 + hash = crypto.SHA512 + default: + return ErrUnsupportedAlgorithm + } + + if len(signature) != 2*keySize { + return fmt.Errorf("go-jose/go-jose: invalid signature size, have %d bytes, wanted %d", len(signature), 2*keySize) + } + + hasher := hash.New() + + // According to documentation, Write() on hash never fails + _, _ = hasher.Write(payload) + hashed := hasher.Sum(nil) + + r := big.NewInt(0).SetBytes(signature[:keySize]) + s := big.NewInt(0).SetBytes(signature[keySize:]) + + match := ecdsa.Verify(ctx.publicKey, hashed, r, s) + if !match { + return errors.New("go-jose/go-jose: ecdsa signature failed to verify") + } + + return nil +} diff --git a/vendor/github.com/go-jose/go-jose/v4/cipher/cbc_hmac.go b/vendor/github.com/go-jose/go-jose/v4/cipher/cbc_hmac.go new file mode 100644 index 0000000000..af029cec0b --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v4/cipher/cbc_hmac.go @@ -0,0 +1,196 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package josecipher + +import ( + "bytes" + "crypto/cipher" + "crypto/hmac" + "crypto/sha256" + "crypto/sha512" + "crypto/subtle" + "encoding/binary" + "errors" + "hash" +) + +const ( + nonceBytes = 16 +) + +// NewCBCHMAC instantiates a new AEAD based on CBC+HMAC. +func NewCBCHMAC(key []byte, newBlockCipher func([]byte) (cipher.Block, error)) (cipher.AEAD, error) { + keySize := len(key) / 2 + integrityKey := key[:keySize] + encryptionKey := key[keySize:] + + blockCipher, err := newBlockCipher(encryptionKey) + if err != nil { + return nil, err + } + + var hash func() hash.Hash + switch keySize { + case 16: + hash = sha256.New + case 24: + hash = sha512.New384 + case 32: + hash = sha512.New + } + + return &cbcAEAD{ + hash: hash, + blockCipher: blockCipher, + authtagBytes: keySize, + integrityKey: integrityKey, + }, nil +} + +// An AEAD based on CBC+HMAC +type cbcAEAD struct { + hash func() hash.Hash + authtagBytes int + integrityKey []byte + blockCipher cipher.Block +} + +func (ctx *cbcAEAD) NonceSize() int { + return nonceBytes +} + +func (ctx *cbcAEAD) Overhead() int { + // Maximum overhead is block size (for padding) plus auth tag length, where + // the length of the auth tag is equivalent to the key size. + return ctx.blockCipher.BlockSize() + ctx.authtagBytes +} + +// Seal encrypts and authenticates the plaintext. +func (ctx *cbcAEAD) Seal(dst, nonce, plaintext, data []byte) []byte { + // Output buffer -- must take care not to mangle plaintext input. + ciphertext := make([]byte, uint64(len(plaintext))+uint64(ctx.Overhead()))[:len(plaintext)] + copy(ciphertext, plaintext) + ciphertext = padBuffer(ciphertext, ctx.blockCipher.BlockSize()) + + cbc := cipher.NewCBCEncrypter(ctx.blockCipher, nonce) + + cbc.CryptBlocks(ciphertext, ciphertext) + authtag := ctx.computeAuthTag(data, nonce, ciphertext) + + ret, out := resize(dst, uint64(len(dst))+uint64(len(ciphertext))+uint64(len(authtag))) + copy(out, ciphertext) + copy(out[len(ciphertext):], authtag) + + return ret +} + +// Open decrypts and authenticates the ciphertext. +func (ctx *cbcAEAD) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) { + if len(ciphertext) < ctx.authtagBytes { + return nil, errors.New("go-jose/go-jose: invalid ciphertext (too short)") + } + + offset := len(ciphertext) - ctx.authtagBytes + expectedTag := ctx.computeAuthTag(data, nonce, ciphertext[:offset]) + match := subtle.ConstantTimeCompare(expectedTag, ciphertext[offset:]) + if match != 1 { + return nil, errors.New("go-jose/go-jose: invalid ciphertext (auth tag mismatch)") + } + + cbc := cipher.NewCBCDecrypter(ctx.blockCipher, nonce) + + // Make copy of ciphertext buffer, don't want to modify in place + buffer := append([]byte{}, ciphertext[:offset]...) + + if len(buffer)%ctx.blockCipher.BlockSize() > 0 { + return nil, errors.New("go-jose/go-jose: invalid ciphertext (invalid length)") + } + + cbc.CryptBlocks(buffer, buffer) + + // Remove padding + plaintext, err := unpadBuffer(buffer, ctx.blockCipher.BlockSize()) + if err != nil { + return nil, err + } + + ret, out := resize(dst, uint64(len(dst))+uint64(len(plaintext))) + copy(out, plaintext) + + return ret, nil +} + +// Compute an authentication tag +func (ctx *cbcAEAD) computeAuthTag(aad, nonce, ciphertext []byte) []byte { + buffer := make([]byte, uint64(len(aad))+uint64(len(nonce))+uint64(len(ciphertext))+8) + n := 0 + n += copy(buffer, aad) + n += copy(buffer[n:], nonce) + n += copy(buffer[n:], ciphertext) + binary.BigEndian.PutUint64(buffer[n:], uint64(len(aad))*8) + + // According to documentation, Write() on hash.Hash never fails. + hmac := hmac.New(ctx.hash, ctx.integrityKey) + _, _ = hmac.Write(buffer) + + return hmac.Sum(nil)[:ctx.authtagBytes] +} + +// resize ensures that the given slice has a capacity of at least n bytes. +// If the capacity of the slice is less than n, a new slice is allocated +// and the existing data will be copied. +func resize(in []byte, n uint64) (head, tail []byte) { + if uint64(cap(in)) >= n { + head = in[:n] + } else { + head = make([]byte, n) + copy(head, in) + } + + tail = head[len(in):] + return +} + +// Apply padding +func padBuffer(buffer []byte, blockSize int) []byte { + missing := blockSize - (len(buffer) % blockSize) + ret, out := resize(buffer, uint64(len(buffer))+uint64(missing)) + padding := bytes.Repeat([]byte{byte(missing)}, missing) + copy(out, padding) + return ret +} + +// Remove padding +func unpadBuffer(buffer []byte, blockSize int) ([]byte, error) { + if len(buffer)%blockSize != 0 { + return nil, errors.New("go-jose/go-jose: invalid padding") + } + + last := buffer[len(buffer)-1] + count := int(last) + + if count == 0 || count > blockSize || count > len(buffer) { + return nil, errors.New("go-jose/go-jose: invalid padding") + } + + padding := bytes.Repeat([]byte{last}, count) + if !bytes.HasSuffix(buffer, padding) { + return nil, errors.New("go-jose/go-jose: invalid padding") + } + + return buffer[:len(buffer)-count], nil +} diff --git a/vendor/github.com/go-jose/go-jose/v4/cipher/concat_kdf.go b/vendor/github.com/go-jose/go-jose/v4/cipher/concat_kdf.go new file mode 100644 index 0000000000..f62c3bdba5 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v4/cipher/concat_kdf.go @@ -0,0 +1,75 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package josecipher + +import ( + "crypto" + "encoding/binary" + "hash" + "io" +) + +type concatKDF struct { + z, info []byte + i uint32 + cache []byte + hasher hash.Hash +} + +// NewConcatKDF builds a KDF reader based on the given inputs. +func NewConcatKDF(hash crypto.Hash, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo []byte) io.Reader { + buffer := make([]byte, uint64(len(algID))+uint64(len(ptyUInfo))+uint64(len(ptyVInfo))+uint64(len(supPubInfo))+uint64(len(supPrivInfo))) + n := 0 + n += copy(buffer, algID) + n += copy(buffer[n:], ptyUInfo) + n += copy(buffer[n:], ptyVInfo) + n += copy(buffer[n:], supPubInfo) + copy(buffer[n:], supPrivInfo) + + hasher := hash.New() + + return &concatKDF{ + z: z, + info: buffer, + hasher: hasher, + cache: []byte{}, + i: 1, + } +} + +func (ctx *concatKDF) Read(out []byte) (int, error) { + copied := copy(out, ctx.cache) + ctx.cache = ctx.cache[copied:] + + for copied < len(out) { + ctx.hasher.Reset() + + // Write on a hash.Hash never fails + _ = binary.Write(ctx.hasher, binary.BigEndian, ctx.i) + _, _ = ctx.hasher.Write(ctx.z) + _, _ = ctx.hasher.Write(ctx.info) + + hash := ctx.hasher.Sum(nil) + chunkCopied := copy(out[copied:], hash) + copied += chunkCopied + ctx.cache = hash[chunkCopied:] + + ctx.i++ + } + + return copied, nil +} diff --git a/vendor/github.com/go-jose/go-jose/v4/cipher/ecdh_es.go b/vendor/github.com/go-jose/go-jose/v4/cipher/ecdh_es.go new file mode 100644 index 0000000000..093c646740 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v4/cipher/ecdh_es.go @@ -0,0 +1,86 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package josecipher + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "encoding/binary" +) + +// DeriveECDHES derives a shared encryption key using ECDH/ConcatKDF as described in JWE/JWA. +// It is an error to call this function with a private/public key that are not on the same +// curve. Callers must ensure that the keys are valid before calling this function. Output +// size may be at most 1<<16 bytes (64 KiB). +func DeriveECDHES(alg string, apuData, apvData []byte, priv *ecdsa.PrivateKey, pub *ecdsa.PublicKey, size int) []byte { + if size > 1<<16 { + panic("ECDH-ES output size too large, must be less than or equal to 1<<16") + } + + // algId, partyUInfo, partyVInfo inputs must be prefixed with the length + algID := lengthPrefixed([]byte(alg)) + ptyUInfo := lengthPrefixed(apuData) + ptyVInfo := lengthPrefixed(apvData) + + // suppPubInfo is the encoded length of the output size in bits + supPubInfo := make([]byte, 4) + binary.BigEndian.PutUint32(supPubInfo, uint32(size)*8) + + if !priv.PublicKey.Curve.IsOnCurve(pub.X, pub.Y) { + panic("public key not on same curve as private key") + } + + z, _ := priv.Curve.ScalarMult(pub.X, pub.Y, priv.D.Bytes()) + zBytes := z.Bytes() + + // Note that calling z.Bytes() on a big.Int may strip leading zero bytes from + // the returned byte array. This can lead to a problem where zBytes will be + // shorter than expected which breaks the key derivation. Therefore we must pad + // to the full length of the expected coordinate here before calling the KDF. + octSize := dSize(priv.Curve) + if len(zBytes) != octSize { + zBytes = append(bytes.Repeat([]byte{0}, octSize-len(zBytes)), zBytes...) + } + + reader := NewConcatKDF(crypto.SHA256, zBytes, algID, ptyUInfo, ptyVInfo, supPubInfo, []byte{}) + key := make([]byte, size) + + // Read on the KDF will never fail + _, _ = reader.Read(key) + + return key +} + +// dSize returns the size in octets for a coordinate on a elliptic curve. +func dSize(curve elliptic.Curve) int { + order := curve.Params().P + bitLen := order.BitLen() + size := bitLen / 8 + if bitLen%8 != 0 { + size++ + } + return size +} + +func lengthPrefixed(data []byte) []byte { + out := make([]byte, len(data)+4) + binary.BigEndian.PutUint32(out, uint32(len(data))) + copy(out[4:], data) + return out +} diff --git a/vendor/github.com/go-jose/go-jose/v4/cipher/key_wrap.go b/vendor/github.com/go-jose/go-jose/v4/cipher/key_wrap.go new file mode 100644 index 0000000000..b9effbca8a --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v4/cipher/key_wrap.go @@ -0,0 +1,109 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package josecipher + +import ( + "crypto/cipher" + "crypto/subtle" + "encoding/binary" + "errors" +) + +var defaultIV = []byte{0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6} + +// KeyWrap implements NIST key wrapping; it wraps a content encryption key (cek) with the given block cipher. +func KeyWrap(block cipher.Block, cek []byte) ([]byte, error) { + if len(cek)%8 != 0 { + return nil, errors.New("go-jose/go-jose: key wrap input must be 8 byte blocks") + } + + n := len(cek) / 8 + r := make([][]byte, n) + + for i := range r { + r[i] = make([]byte, 8) + copy(r[i], cek[i*8:]) + } + + buffer := make([]byte, 16) + tBytes := make([]byte, 8) + copy(buffer, defaultIV) + + for t := 0; t < 6*n; t++ { + copy(buffer[8:], r[t%n]) + + block.Encrypt(buffer, buffer) + + binary.BigEndian.PutUint64(tBytes, uint64(t+1)) + + for i := 0; i < 8; i++ { + buffer[i] ^= tBytes[i] + } + copy(r[t%n], buffer[8:]) + } + + out := make([]byte, (n+1)*8) + copy(out, buffer[:8]) + for i := range r { + copy(out[(i+1)*8:], r[i]) + } + + return out, nil +} + +// KeyUnwrap implements NIST key unwrapping; it unwraps a content encryption key (cek) with the given block cipher. +func KeyUnwrap(block cipher.Block, ciphertext []byte) ([]byte, error) { + if len(ciphertext)%8 != 0 { + return nil, errors.New("go-jose/go-jose: key wrap input must be 8 byte blocks") + } + + n := (len(ciphertext) / 8) - 1 + r := make([][]byte, n) + + for i := range r { + r[i] = make([]byte, 8) + copy(r[i], ciphertext[(i+1)*8:]) + } + + buffer := make([]byte, 16) + tBytes := make([]byte, 8) + copy(buffer[:8], ciphertext[:8]) + + for t := 6*n - 1; t >= 0; t-- { + binary.BigEndian.PutUint64(tBytes, uint64(t+1)) + + for i := 0; i < 8; i++ { + buffer[i] ^= tBytes[i] + } + copy(buffer[8:], r[t%n]) + + block.Decrypt(buffer, buffer) + + copy(r[t%n], buffer[8:]) + } + + if subtle.ConstantTimeCompare(buffer[:8], defaultIV) == 0 { + return nil, errors.New("go-jose/go-jose: failed to unwrap key") + } + + out := make([]byte, n*8) + for i := range r { + copy(out[i*8:], r[i]) + } + + return out, nil +} diff --git a/vendor/github.com/go-jose/go-jose/v4/crypter.go b/vendor/github.com/go-jose/go-jose/v4/crypter.go new file mode 100644 index 0000000000..aba08424c3 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v4/crypter.go @@ -0,0 +1,593 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jose + +import ( + "crypto/ecdsa" + "crypto/rsa" + "errors" + "fmt" + + "github.com/go-jose/go-jose/v4/json" +) + +// Encrypter represents an encrypter which produces an encrypted JWE object. +type Encrypter interface { + Encrypt(plaintext []byte) (*JSONWebEncryption, error) + EncryptWithAuthData(plaintext []byte, aad []byte) (*JSONWebEncryption, error) + Options() EncrypterOptions +} + +// A generic content cipher +type contentCipher interface { + keySize() int + encrypt(cek []byte, aad, plaintext []byte) (*aeadParts, error) + decrypt(cek []byte, aad []byte, parts *aeadParts) ([]byte, error) +} + +// A key generator (for generating/getting a CEK) +type keyGenerator interface { + keySize() int + genKey() ([]byte, rawHeader, error) +} + +// A generic key encrypter +type keyEncrypter interface { + encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) // Encrypt a key +} + +// A generic key decrypter +type keyDecrypter interface { + decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) // Decrypt a key +} + +// A generic encrypter based on the given key encrypter and content cipher. +type genericEncrypter struct { + contentAlg ContentEncryption + compressionAlg CompressionAlgorithm + cipher contentCipher + recipients []recipientKeyInfo + keyGenerator keyGenerator + extraHeaders map[HeaderKey]interface{} +} + +type recipientKeyInfo struct { + keyID string + keyAlg KeyAlgorithm + keyEncrypter keyEncrypter +} + +// EncrypterOptions represents options that can be set on new encrypters. +type EncrypterOptions struct { + Compression CompressionAlgorithm + + // Optional map of name/value pairs to be inserted into the protected + // header of a JWS object. Some specifications which make use of + // JWS require additional values here. + // + // Values will be serialized by [json.Marshal] and must be valid inputs to + // that function. + // + // [json.Marshal]: https://pkg.go.dev/encoding/json#Marshal + ExtraHeaders map[HeaderKey]interface{} +} + +// WithHeader adds an arbitrary value to the ExtraHeaders map, initializing it +// if necessary, and returns the updated EncrypterOptions. +// +// The v parameter will be serialized by [json.Marshal] and must be a valid +// input to that function. +// +// [json.Marshal]: https://pkg.go.dev/encoding/json#Marshal +func (eo *EncrypterOptions) WithHeader(k HeaderKey, v interface{}) *EncrypterOptions { + if eo.ExtraHeaders == nil { + eo.ExtraHeaders = map[HeaderKey]interface{}{} + } + eo.ExtraHeaders[k] = v + return eo +} + +// WithContentType adds a content type ("cty") header and returns the updated +// EncrypterOptions. +func (eo *EncrypterOptions) WithContentType(contentType ContentType) *EncrypterOptions { + return eo.WithHeader(HeaderContentType, contentType) +} + +// WithType adds a type ("typ") header and returns the updated EncrypterOptions. +func (eo *EncrypterOptions) WithType(typ ContentType) *EncrypterOptions { + return eo.WithHeader(HeaderType, typ) +} + +// Recipient represents an algorithm/key to encrypt messages to. +// +// PBES2Count and PBES2Salt correspond with the "p2c" and "p2s" headers used +// on the password-based encryption algorithms PBES2-HS256+A128KW, +// PBES2-HS384+A192KW, and PBES2-HS512+A256KW. If they are not provided a safe +// default of 100000 will be used for the count and a 128-bit random salt will +// be generated. +type Recipient struct { + Algorithm KeyAlgorithm + // Key must have one of these types: + // - ed25519.PublicKey + // - *ecdsa.PublicKey + // - *rsa.PublicKey + // - *JSONWebKey + // - JSONWebKey + // - []byte (a symmetric key) + // - Any type that satisfies the OpaqueKeyEncrypter interface + // + // The type of Key must match the value of Algorithm. + Key interface{} + KeyID string + PBES2Count int + PBES2Salt []byte +} + +// NewEncrypter creates an appropriate encrypter based on the key type +func NewEncrypter(enc ContentEncryption, rcpt Recipient, opts *EncrypterOptions) (Encrypter, error) { + encrypter := &genericEncrypter{ + contentAlg: enc, + recipients: []recipientKeyInfo{}, + cipher: getContentCipher(enc), + } + if opts != nil { + encrypter.compressionAlg = opts.Compression + encrypter.extraHeaders = opts.ExtraHeaders + } + + if encrypter.cipher == nil { + return nil, ErrUnsupportedAlgorithm + } + + var keyID string + var rawKey interface{} + switch encryptionKey := rcpt.Key.(type) { + case JSONWebKey: + keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key + case *JSONWebKey: + keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key + case OpaqueKeyEncrypter: + keyID, rawKey = encryptionKey.KeyID(), encryptionKey + default: + rawKey = encryptionKey + } + + switch rcpt.Algorithm { + case DIRECT: + // Direct encryption mode must be treated differently + keyBytes, ok := rawKey.([]byte) + if !ok { + return nil, ErrUnsupportedKeyType + } + if encrypter.cipher.keySize() != len(keyBytes) { + return nil, ErrInvalidKeySize + } + encrypter.keyGenerator = staticKeyGenerator{ + key: keyBytes, + } + recipientInfo, _ := newSymmetricRecipient(rcpt.Algorithm, keyBytes) + recipientInfo.keyID = keyID + if rcpt.KeyID != "" { + recipientInfo.keyID = rcpt.KeyID + } + encrypter.recipients = []recipientKeyInfo{recipientInfo} + return encrypter, nil + case ECDH_ES: + // ECDH-ES (w/o key wrapping) is similar to DIRECT mode + keyDSA, ok := rawKey.(*ecdsa.PublicKey) + if !ok { + return nil, ErrUnsupportedKeyType + } + encrypter.keyGenerator = ecKeyGenerator{ + size: encrypter.cipher.keySize(), + algID: string(enc), + publicKey: keyDSA, + } + recipientInfo, _ := newECDHRecipient(rcpt.Algorithm, keyDSA) + recipientInfo.keyID = keyID + if rcpt.KeyID != "" { + recipientInfo.keyID = rcpt.KeyID + } + encrypter.recipients = []recipientKeyInfo{recipientInfo} + return encrypter, nil + default: + // Can just add a standard recipient + encrypter.keyGenerator = randomKeyGenerator{ + size: encrypter.cipher.keySize(), + } + err := encrypter.addRecipient(rcpt) + return encrypter, err + } +} + +// NewMultiEncrypter creates a multi-encrypter based on the given parameters +func NewMultiEncrypter(enc ContentEncryption, rcpts []Recipient, opts *EncrypterOptions) (Encrypter, error) { + cipher := getContentCipher(enc) + + if cipher == nil { + return nil, ErrUnsupportedAlgorithm + } + if len(rcpts) == 0 { + return nil, fmt.Errorf("go-jose/go-jose: recipients is nil or empty") + } + + encrypter := &genericEncrypter{ + contentAlg: enc, + recipients: []recipientKeyInfo{}, + cipher: cipher, + keyGenerator: randomKeyGenerator{ + size: cipher.keySize(), + }, + } + + if opts != nil { + encrypter.compressionAlg = opts.Compression + encrypter.extraHeaders = opts.ExtraHeaders + } + + for _, recipient := range rcpts { + err := encrypter.addRecipient(recipient) + if err != nil { + return nil, err + } + } + + return encrypter, nil +} + +func (ctx *genericEncrypter) addRecipient(recipient Recipient) (err error) { + var recipientInfo recipientKeyInfo + + switch recipient.Algorithm { + case DIRECT, ECDH_ES: + return fmt.Errorf("go-jose/go-jose: key algorithm '%s' not supported in multi-recipient mode", recipient.Algorithm) + } + + recipientInfo, err = makeJWERecipient(recipient.Algorithm, recipient.Key) + if recipient.KeyID != "" { + recipientInfo.keyID = recipient.KeyID + } + + switch recipient.Algorithm { + case PBES2_HS256_A128KW, PBES2_HS384_A192KW, PBES2_HS512_A256KW: + if sr, ok := recipientInfo.keyEncrypter.(*symmetricKeyCipher); ok { + sr.p2c = recipient.PBES2Count + sr.p2s = recipient.PBES2Salt + } + } + + if err == nil { + ctx.recipients = append(ctx.recipients, recipientInfo) + } + return err +} + +func makeJWERecipient(alg KeyAlgorithm, encryptionKey interface{}) (recipientKeyInfo, error) { + switch encryptionKey := encryptionKey.(type) { + case *rsa.PublicKey: + return newRSARecipient(alg, encryptionKey) + case *ecdsa.PublicKey: + return newECDHRecipient(alg, encryptionKey) + case []byte: + return newSymmetricRecipient(alg, encryptionKey) + case string: + return newSymmetricRecipient(alg, []byte(encryptionKey)) + case *JSONWebKey: + recipient, err := makeJWERecipient(alg, encryptionKey.Key) + recipient.keyID = encryptionKey.KeyID + return recipient, err + case OpaqueKeyEncrypter: + return newOpaqueKeyEncrypter(alg, encryptionKey) + } + return recipientKeyInfo{}, ErrUnsupportedKeyType +} + +// newDecrypter creates an appropriate decrypter based on the key type +func newDecrypter(decryptionKey interface{}) (keyDecrypter, error) { + switch decryptionKey := decryptionKey.(type) { + case *rsa.PrivateKey: + return &rsaDecrypterSigner{ + privateKey: decryptionKey, + }, nil + case *ecdsa.PrivateKey: + return &ecDecrypterSigner{ + privateKey: decryptionKey, + }, nil + case []byte: + return &symmetricKeyCipher{ + key: decryptionKey, + }, nil + case string: + return &symmetricKeyCipher{ + key: []byte(decryptionKey), + }, nil + case JSONWebKey: + return newDecrypter(decryptionKey.Key) + case *JSONWebKey: + return newDecrypter(decryptionKey.Key) + case OpaqueKeyDecrypter: + return &opaqueKeyDecrypter{decrypter: decryptionKey}, nil + default: + return nil, ErrUnsupportedKeyType + } +} + +// Implementation of encrypt method producing a JWE object. +func (ctx *genericEncrypter) Encrypt(plaintext []byte) (*JSONWebEncryption, error) { + return ctx.EncryptWithAuthData(plaintext, nil) +} + +// Implementation of encrypt method producing a JWE object. +func (ctx *genericEncrypter) EncryptWithAuthData(plaintext, aad []byte) (*JSONWebEncryption, error) { + obj := &JSONWebEncryption{} + obj.aad = aad + + obj.protected = &rawHeader{} + err := obj.protected.set(headerEncryption, ctx.contentAlg) + if err != nil { + return nil, err + } + + obj.recipients = make([]recipientInfo, len(ctx.recipients)) + + if len(ctx.recipients) == 0 { + return nil, fmt.Errorf("go-jose/go-jose: no recipients to encrypt to") + } + + cek, headers, err := ctx.keyGenerator.genKey() + if err != nil { + return nil, err + } + + obj.protected.merge(&headers) + + for i, info := range ctx.recipients { + recipient, err := info.keyEncrypter.encryptKey(cek, info.keyAlg) + if err != nil { + return nil, err + } + + err = recipient.header.set(headerAlgorithm, info.keyAlg) + if err != nil { + return nil, err + } + + if info.keyID != "" { + err = recipient.header.set(headerKeyID, info.keyID) + if err != nil { + return nil, err + } + } + obj.recipients[i] = recipient + } + + if len(ctx.recipients) == 1 { + // Move per-recipient headers into main protected header if there's + // only a single recipient. + obj.protected.merge(obj.recipients[0].header) + obj.recipients[0].header = nil + } + + if ctx.compressionAlg != NONE { + plaintext, err = compress(ctx.compressionAlg, plaintext) + if err != nil { + return nil, err + } + + err = obj.protected.set(headerCompression, ctx.compressionAlg) + if err != nil { + return nil, err + } + } + + for k, v := range ctx.extraHeaders { + b, err := json.Marshal(v) + if err != nil { + return nil, err + } + (*obj.protected)[k] = makeRawMessage(b) + } + + authData := obj.computeAuthData() + parts, err := ctx.cipher.encrypt(cek, authData, plaintext) + if err != nil { + return nil, err + } + + obj.iv = parts.iv + obj.ciphertext = parts.ciphertext + obj.tag = parts.tag + + return obj, nil +} + +func (ctx *genericEncrypter) Options() EncrypterOptions { + return EncrypterOptions{ + Compression: ctx.compressionAlg, + ExtraHeaders: ctx.extraHeaders, + } +} + +// Decrypt and validate the object and return the plaintext. This +// function does not support multi-recipient. If you desire multi-recipient +// decryption use DecryptMulti instead. +// +// The decryptionKey argument must contain a private or symmetric key +// and must have one of these types: +// - *ecdsa.PrivateKey +// - *rsa.PrivateKey +// - *JSONWebKey +// - JSONWebKey +// - *JSONWebKeySet +// - JSONWebKeySet +// - []byte (a symmetric key) +// - string (a symmetric key) +// - Any type that satisfies the OpaqueKeyDecrypter interface. +// +// Note that ed25519 is only available for signatures, not encryption, so is +// not an option here. +// +// Automatically decompresses plaintext, but returns an error if the decompressed +// data would be >250kB or >10x the size of the compressed data, whichever is larger. +func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) { + headers := obj.mergedHeaders(nil) + + if len(obj.recipients) > 1 { + return nil, errors.New("go-jose/go-jose: too many recipients in payload; expecting only one") + } + + critical, err := headers.getCritical() + if err != nil { + return nil, fmt.Errorf("go-jose/go-jose: invalid crit header") + } + + if len(critical) > 0 { + return nil, fmt.Errorf("go-jose/go-jose: unsupported crit header") + } + + key := tryJWKS(decryptionKey, obj.Header) + decrypter, err := newDecrypter(key) + if err != nil { + return nil, err + } + + cipher := getContentCipher(headers.getEncryption()) + if cipher == nil { + return nil, fmt.Errorf("go-jose/go-jose: unsupported enc value '%s'", string(headers.getEncryption())) + } + + generator := randomKeyGenerator{ + size: cipher.keySize(), + } + + parts := &aeadParts{ + iv: obj.iv, + ciphertext: obj.ciphertext, + tag: obj.tag, + } + + authData := obj.computeAuthData() + + var plaintext []byte + recipient := obj.recipients[0] + recipientHeaders := obj.mergedHeaders(&recipient) + + cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator) + if err == nil { + // Found a valid CEK -- let's try to decrypt. + plaintext, err = cipher.decrypt(cek, authData, parts) + } + + if plaintext == nil { + return nil, ErrCryptoFailure + } + + // The "zip" header parameter may only be present in the protected header. + if comp := obj.protected.getCompression(); comp != "" { + plaintext, err = decompress(comp, plaintext) + if err != nil { + return nil, fmt.Errorf("go-jose/go-jose: failed to decompress plaintext: %v", err) + } + } + + return plaintext, nil +} + +// DecryptMulti decrypts and validates the object and returns the plaintexts, +// with support for multiple recipients. It returns the index of the recipient +// for which the decryption was successful, the merged headers for that recipient, +// and the plaintext. +// +// The decryptionKey argument must have one of the types allowed for the +// decryptionKey argument of Decrypt(). +// +// Automatically decompresses plaintext, but returns an error if the decompressed +// data would be >250kB or >3x the size of the compressed data, whichever is larger. +func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Header, []byte, error) { + globalHeaders := obj.mergedHeaders(nil) + + critical, err := globalHeaders.getCritical() + if err != nil { + return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: invalid crit header") + } + + if len(critical) > 0 { + return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported crit header") + } + + key := tryJWKS(decryptionKey, obj.Header) + decrypter, err := newDecrypter(key) + if err != nil { + return -1, Header{}, nil, err + } + + encryption := globalHeaders.getEncryption() + cipher := getContentCipher(encryption) + if cipher == nil { + return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported enc value '%s'", string(encryption)) + } + + generator := randomKeyGenerator{ + size: cipher.keySize(), + } + + parts := &aeadParts{ + iv: obj.iv, + ciphertext: obj.ciphertext, + tag: obj.tag, + } + + authData := obj.computeAuthData() + + index := -1 + var plaintext []byte + var headers rawHeader + + for i, recipient := range obj.recipients { + recipientHeaders := obj.mergedHeaders(&recipient) + + cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator) + if err == nil { + // Found a valid CEK -- let's try to decrypt. + plaintext, err = cipher.decrypt(cek, authData, parts) + if err == nil { + index = i + headers = recipientHeaders + break + } + } + } + + if plaintext == nil { + return -1, Header{}, nil, ErrCryptoFailure + } + + // The "zip" header parameter may only be present in the protected header. + if comp := obj.protected.getCompression(); comp != "" { + plaintext, err = decompress(comp, plaintext) + if err != nil { + return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: failed to decompress plaintext: %v", err) + } + } + + sanitized, err := headers.sanitized() + if err != nil { + return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: failed to sanitize header: %v", err) + } + + return index, sanitized, plaintext, err +} diff --git a/vendor/github.com/go-jose/go-jose/v4/doc.go b/vendor/github.com/go-jose/go-jose/v4/doc.go new file mode 100644 index 0000000000..0ad40ca085 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v4/doc.go @@ -0,0 +1,25 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* +Package jose aims to provide an implementation of the Javascript Object Signing +and Encryption set of standards. It implements encryption and signing based on +the JSON Web Encryption and JSON Web Signature standards, with optional JSON Web +Token support available in a sub-package. The library supports both the compact +and JWS/JWE JSON Serialization formats, and has optional support for multiple +recipients. +*/ +package jose diff --git a/vendor/github.com/go-jose/go-jose/v4/encoding.go b/vendor/github.com/go-jose/go-jose/v4/encoding.go new file mode 100644 index 0000000000..4f6e0d4a5c --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v4/encoding.go @@ -0,0 +1,228 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jose + +import ( + "bytes" + "compress/flate" + "encoding/base64" + "encoding/binary" + "fmt" + "io" + "math/big" + "strings" + "unicode" + + "github.com/go-jose/go-jose/v4/json" +) + +// Helper function to serialize known-good objects. +// Precondition: value is not a nil pointer. +func mustSerializeJSON(value interface{}) []byte { + out, err := json.Marshal(value) + if err != nil { + panic(err) + } + // We never want to serialize the top-level value "null," since it's not a + // valid JOSE message. But if a caller passes in a nil pointer to this method, + // MarshalJSON will happily serialize it as the top-level value "null". If + // that value is then embedded in another operation, for instance by being + // base64-encoded and fed as input to a signing algorithm + // (https://github.com/go-jose/go-jose/issues/22), the result will be + // incorrect. Because this method is intended for known-good objects, and a nil + // pointer is not a known-good object, we are free to panic in this case. + // Note: It's not possible to directly check whether the data pointed at by an + // interface is a nil pointer, so we do this hacky workaround. + // https://groups.google.com/forum/#!topic/golang-nuts/wnH302gBa4I + if string(out) == "null" { + panic("Tried to serialize a nil pointer.") + } + return out +} + +// Strip all newlines and whitespace +func stripWhitespace(data string) string { + buf := strings.Builder{} + buf.Grow(len(data)) + for _, r := range data { + if !unicode.IsSpace(r) { + buf.WriteRune(r) + } + } + return buf.String() +} + +// Perform compression based on algorithm +func compress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) { + switch algorithm { + case DEFLATE: + return deflate(input) + default: + return nil, ErrUnsupportedAlgorithm + } +} + +// Perform decompression based on algorithm +func decompress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) { + switch algorithm { + case DEFLATE: + return inflate(input) + default: + return nil, ErrUnsupportedAlgorithm + } +} + +// deflate compresses the input. +func deflate(input []byte) ([]byte, error) { + output := new(bytes.Buffer) + + // Writing to byte buffer, err is always nil + writer, _ := flate.NewWriter(output, 1) + _, _ = io.Copy(writer, bytes.NewBuffer(input)) + + err := writer.Close() + return output.Bytes(), err +} + +// inflate decompresses the input. +// +// Errors if the decompressed data would be >250kB or >10x the size of the +// compressed data, whichever is larger. +func inflate(input []byte) ([]byte, error) { + output := new(bytes.Buffer) + reader := flate.NewReader(bytes.NewBuffer(input)) + + maxCompressedSize := max(250_000, 10*int64(len(input))) + + limit := maxCompressedSize + 1 + n, err := io.CopyN(output, reader, limit) + if err != nil && err != io.EOF { + return nil, err + } + if n == limit { + return nil, fmt.Errorf("uncompressed data would be too large (>%d bytes)", maxCompressedSize) + } + + err = reader.Close() + return output.Bytes(), err +} + +// byteBuffer represents a slice of bytes that can be serialized to url-safe base64. +type byteBuffer struct { + data []byte +} + +func newBuffer(data []byte) *byteBuffer { + if data == nil { + return nil + } + return &byteBuffer{ + data: data, + } +} + +func newFixedSizeBuffer(data []byte, length int) *byteBuffer { + if len(data) > length { + panic("go-jose/go-jose: invalid call to newFixedSizeBuffer (len(data) > length)") + } + pad := make([]byte, length-len(data)) + return newBuffer(append(pad, data...)) +} + +func newBufferFromInt(num uint64) *byteBuffer { + data := make([]byte, 8) + binary.BigEndian.PutUint64(data, num) + return newBuffer(bytes.TrimLeft(data, "\x00")) +} + +func (b *byteBuffer) MarshalJSON() ([]byte, error) { + return json.Marshal(b.base64()) +} + +func (b *byteBuffer) UnmarshalJSON(data []byte) error { + var encoded string + err := json.Unmarshal(data, &encoded) + if err != nil { + return err + } + + if encoded == "" { + return nil + } + + decoded, err := base64.RawURLEncoding.DecodeString(encoded) + if err != nil { + return err + } + + *b = *newBuffer(decoded) + + return nil +} + +func (b *byteBuffer) base64() string { + return base64.RawURLEncoding.EncodeToString(b.data) +} + +func (b *byteBuffer) bytes() []byte { + // Handling nil here allows us to transparently handle nil slices when serializing. + if b == nil { + return nil + } + return b.data +} + +func (b byteBuffer) bigInt() *big.Int { + return new(big.Int).SetBytes(b.data) +} + +func (b byteBuffer) toInt() int { + return int(b.bigInt().Int64()) +} + +func base64EncodeLen(sl []byte) int { + return base64.RawURLEncoding.EncodedLen(len(sl)) +} + +func base64JoinWithDots(inputs ...[]byte) string { + if len(inputs) == 0 { + return "" + } + + // Count of dots. + totalCount := len(inputs) - 1 + + for _, input := range inputs { + totalCount += base64EncodeLen(input) + } + + out := make([]byte, totalCount) + startEncode := 0 + for i, input := range inputs { + base64.RawURLEncoding.Encode(out[startEncode:], input) + + if i == len(inputs)-1 { + continue + } + + startEncode += base64EncodeLen(input) + out[startEncode] = '.' + startEncode++ + } + + return string(out) +} diff --git a/vendor/github.com/go-jose/go-jose/v4/json/LICENSE b/vendor/github.com/go-jose/go-jose/v4/json/LICENSE new file mode 100644 index 0000000000..7448756763 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v4/json/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/go-jose/go-jose/v4/json/README.md b/vendor/github.com/go-jose/go-jose/v4/json/README.md new file mode 100644 index 0000000000..86de5e5581 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v4/json/README.md @@ -0,0 +1,13 @@ +# Safe JSON + +This repository contains a fork of the `encoding/json` package from Go 1.6. + +The following changes were made: + +* Object deserialization uses case-sensitive member name matching instead of + [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html). + This is to avoid differences in the interpretation of JOSE messages between + go-jose and libraries written in other languages. +* When deserializing a JSON object, we check for duplicate keys and reject the + input whenever we detect a duplicate. Rather than trying to work with malformed + data, we prefer to reject it right away. diff --git a/vendor/github.com/go-jose/go-jose/v4/json/decode.go b/vendor/github.com/go-jose/go-jose/v4/json/decode.go new file mode 100644 index 0000000000..50634dd847 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v4/json/decode.go @@ -0,0 +1,1216 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Represents JSON data structure using native Go types: booleans, floats, +// strings, arrays, and maps. + +package json + +import ( + "bytes" + "encoding" + "encoding/base64" + "errors" + "fmt" + "math" + "reflect" + "runtime" + "strconv" + "unicode" + "unicode/utf16" + "unicode/utf8" +) + +// Unmarshal parses the JSON-encoded data and stores the result +// in the value pointed to by v. +// +// Unmarshal uses the inverse of the encodings that +// Marshal uses, allocating maps, slices, and pointers as necessary, +// with the following additional rules: +// +// To unmarshal JSON into a pointer, Unmarshal first handles the case of +// the JSON being the JSON literal null. In that case, Unmarshal sets +// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into +// the value pointed at by the pointer. If the pointer is nil, Unmarshal +// allocates a new value for it to point to. +// +// To unmarshal JSON into a struct, Unmarshal matches incoming object +// keys to the keys used by Marshal (either the struct field name or its tag), +// preferring an exact match but also accepting a case-insensitive match. +// Unmarshal will only set exported fields of the struct. +// +// To unmarshal JSON into an interface value, +// Unmarshal stores one of these in the interface value: +// +// bool, for JSON booleans +// float64, for JSON numbers +// string, for JSON strings +// []interface{}, for JSON arrays +// map[string]interface{}, for JSON objects +// nil for JSON null +// +// To unmarshal a JSON array into a slice, Unmarshal resets the slice length +// to zero and then appends each element to the slice. +// As a special case, to unmarshal an empty JSON array into a slice, +// Unmarshal replaces the slice with a new empty slice. +// +// To unmarshal a JSON array into a Go array, Unmarshal decodes +// JSON array elements into corresponding Go array elements. +// If the Go array is smaller than the JSON array, +// the additional JSON array elements are discarded. +// If the JSON array is smaller than the Go array, +// the additional Go array elements are set to zero values. +// +// To unmarshal a JSON object into a string-keyed map, Unmarshal first +// establishes a map to use, If the map is nil, Unmarshal allocates a new map. +// Otherwise Unmarshal reuses the existing map, keeping existing entries. +// Unmarshal then stores key-value pairs from the JSON object into the map. +// +// If a JSON value is not appropriate for a given target type, +// or if a JSON number overflows the target type, Unmarshal +// skips that field and completes the unmarshaling as best it can. +// If no more serious errors are encountered, Unmarshal returns +// an UnmarshalTypeError describing the earliest such error. +// +// The JSON null value unmarshals into an interface, map, pointer, or slice +// by setting that Go value to nil. Because null is often used in JSON to mean +// “not present,” unmarshaling a JSON null into any other Go type has no effect +// on the value and produces no error. +// +// When unmarshaling quoted strings, invalid UTF-8 or +// invalid UTF-16 surrogate pairs are not treated as an error. +// Instead, they are replaced by the Unicode replacement +// character U+FFFD. +func Unmarshal(data []byte, v interface{}) error { + // Check for well-formedness. + // Avoids filling out half a data structure + // before discovering a JSON syntax error. + var d decodeState + err := checkValid(data, &d.scan) + if err != nil { + return err + } + + d.init(data) + return d.unmarshal(v) +} + +// Unmarshaler is the interface implemented by objects +// that can unmarshal a JSON description of themselves. +// The input can be assumed to be a valid encoding of +// a JSON value. UnmarshalJSON must copy the JSON data +// if it wishes to retain the data after returning. +type Unmarshaler interface { + UnmarshalJSON([]byte) error +} + +// An UnmarshalTypeError describes a JSON value that was +// not appropriate for a value of a specific Go type. +type UnmarshalTypeError struct { + Value string // description of JSON value - "bool", "array", "number -5" + Type reflect.Type // type of Go value it could not be assigned to + Offset int64 // error occurred after reading Offset bytes +} + +func (e *UnmarshalTypeError) Error() string { + return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() +} + +// An UnmarshalFieldError describes a JSON object key that +// led to an unexported (and therefore unwritable) struct field. +// (No longer used; kept for compatibility.) +type UnmarshalFieldError struct { + Key string + Type reflect.Type + Field reflect.StructField +} + +func (e *UnmarshalFieldError) Error() string { + return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String() +} + +// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. +// (The argument to Unmarshal must be a non-nil pointer.) +type InvalidUnmarshalError struct { + Type reflect.Type +} + +func (e *InvalidUnmarshalError) Error() string { + if e.Type == nil { + return "json: Unmarshal(nil)" + } + + if e.Type.Kind() != reflect.Ptr { + return "json: Unmarshal(non-pointer " + e.Type.String() + ")" + } + return "json: Unmarshal(nil " + e.Type.String() + ")" +} + +func (d *decodeState) unmarshal(v interface{}) (err error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + err = r.(error) + } + }() + + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr || rv.IsNil() { + return &InvalidUnmarshalError{reflect.TypeOf(v)} + } + + d.scan.reset() + // We decode rv not rv.Elem because the Unmarshaler interface + // test must be applied at the top level of the value. + d.value(rv) + return d.savedError +} + +// A Number represents a JSON number literal. +type Number string + +// String returns the literal text of the number. +func (n Number) String() string { return string(n) } + +// Float64 returns the number as a float64. +func (n Number) Float64() (float64, error) { + return strconv.ParseFloat(string(n), 64) +} + +// Int64 returns the number as an int64. +func (n Number) Int64() (int64, error) { + return strconv.ParseInt(string(n), 10, 64) +} + +// isValidNumber reports whether s is a valid JSON number literal. +func isValidNumber(s string) bool { + // This function implements the JSON numbers grammar. + // See https://tools.ietf.org/html/rfc7159#section-6 + // and http://json.org/number.gif + + if s == "" { + return false + } + + // Optional - + if s[0] == '-' { + s = s[1:] + if s == "" { + return false + } + } + + // Digits + switch { + default: + return false + + case s[0] == '0': + s = s[1:] + + case '1' <= s[0] && s[0] <= '9': + s = s[1:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // . followed by 1 or more digits. + if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { + s = s[2:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // e or E followed by an optional - or + and + // 1 or more digits. + if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { + s = s[1:] + if s[0] == '+' || s[0] == '-' { + s = s[1:] + if s == "" { + return false + } + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // Make sure we are at the end. + return s == "" +} + +type NumberUnmarshalType int + +const ( + // unmarshal a JSON number into an interface{} as a float64 + UnmarshalFloat NumberUnmarshalType = iota + // unmarshal a JSON number into an interface{} as a `json.Number` + UnmarshalJSONNumber + // unmarshal a JSON number into an interface{} as a int64 + // if value is an integer otherwise float64 + UnmarshalIntOrFloat +) + +// decodeState represents the state while decoding a JSON value. +type decodeState struct { + data []byte + off int // read offset in data + scan scanner + nextscan scanner // for calls to nextValue + savedError error + numberType NumberUnmarshalType +} + +// errPhase is used for errors that should not happen unless +// there is a bug in the JSON decoder or something is editing +// the data slice while the decoder executes. +var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?") + +func (d *decodeState) init(data []byte) *decodeState { + d.data = data + d.off = 0 + d.savedError = nil + return d +} + +// error aborts the decoding by panicking with err. +func (d *decodeState) error(err error) { + panic(err) +} + +// saveError saves the first err it is called with, +// for reporting at the end of the unmarshal. +func (d *decodeState) saveError(err error) { + if d.savedError == nil { + d.savedError = err + } +} + +// next cuts off and returns the next full JSON value in d.data[d.off:]. +// The next value is known to be an object or array, not a literal. +func (d *decodeState) next() []byte { + c := d.data[d.off] + item, rest, err := nextValue(d.data[d.off:], &d.nextscan) + if err != nil { + d.error(err) + } + d.off = len(d.data) - len(rest) + + // Our scanner has seen the opening brace/bracket + // and thinks we're still in the middle of the object. + // invent a closing brace/bracket to get it out. + if c == '{' { + d.scan.step(&d.scan, '}') + } else { + d.scan.step(&d.scan, ']') + } + + return item +} + +// scanWhile processes bytes in d.data[d.off:] until it +// receives a scan code not equal to op. +// It updates d.off and returns the new scan code. +func (d *decodeState) scanWhile(op int) int { + var newOp int + for { + if d.off >= len(d.data) { + newOp = d.scan.eof() + d.off = len(d.data) + 1 // mark processed EOF with len+1 + } else { + c := d.data[d.off] + d.off++ + newOp = d.scan.step(&d.scan, c) + } + if newOp != op { + break + } + } + return newOp +} + +// value decodes a JSON value from d.data[d.off:] into the value. +// it updates d.off to point past the decoded value. +func (d *decodeState) value(v reflect.Value) { + if !v.IsValid() { + _, rest, err := nextValue(d.data[d.off:], &d.nextscan) + if err != nil { + d.error(err) + } + d.off = len(d.data) - len(rest) + + // d.scan thinks we're still at the beginning of the item. + // Feed in an empty string - the shortest, simplest value - + // so that it knows we got to the end of the value. + if d.scan.redo { + // rewind. + d.scan.redo = false + d.scan.step = stateBeginValue + } + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '"') + + n := len(d.scan.parseState) + if n > 0 && d.scan.parseState[n-1] == parseObjectKey { + // d.scan thinks we just read an object key; finish the object + d.scan.step(&d.scan, ':') + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '}') + } + + return + } + + switch op := d.scanWhile(scanSkipSpace); op { + default: + d.error(errPhase) + + case scanBeginArray: + d.array(v) + + case scanBeginObject: + d.object(v) + + case scanBeginLiteral: + d.literal(v) + } +} + +type unquotedValue struct{} + +// valueQuoted is like value but decodes a +// quoted string literal or literal null into an interface value. +// If it finds anything other than a quoted string literal or null, +// valueQuoted returns unquotedValue{}. +func (d *decodeState) valueQuoted() interface{} { + switch op := d.scanWhile(scanSkipSpace); op { + default: + d.error(errPhase) + + case scanBeginArray: + d.array(reflect.Value{}) + + case scanBeginObject: + d.object(reflect.Value{}) + + case scanBeginLiteral: + switch v := d.literalInterface().(type) { + case nil, string: + return v + } + } + return unquotedValue{} +} + +// indirect walks down v allocating pointers as needed, +// until it gets to a non-pointer. +// if it encounters an Unmarshaler, indirect stops and returns that. +// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. +func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // If v is a named type and is addressable, + // start with its address, so that if the type has pointer methods, + // we find them. + if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { + v = v.Addr() + } + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + v = e + continue + } + } + + if v.Kind() != reflect.Ptr { + break + } + + if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { + break + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + if v.Type().NumMethod() > 0 { + if u, ok := v.Interface().(Unmarshaler); ok { + return u, nil, reflect.Value{} + } + if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + return nil, u, reflect.Value{} + } + } + v = v.Elem() + } + return nil, nil, v +} + +// array consumes an array from d.data[d.off-1:], decoding into the value v. +// the first byte of the array ('[') has been read already. +func (d *decodeState) array(v reflect.Value) { + // Check for unmarshaler. + u, ut, pv := d.indirect(v, false) + if u != nil { + d.off-- + err := u.UnmarshalJSON(d.next()) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) + d.off-- + d.next() + return + } + + v = pv + + // Check type of target. + switch v.Kind() { + case reflect.Interface: + if v.NumMethod() == 0 { + // Decoding into nil interface? Switch to non-reflect code. + v.Set(reflect.ValueOf(d.arrayInterface())) + return + } + // Otherwise it's invalid. + fallthrough + default: + d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) + d.off-- + d.next() + return + case reflect.Array: + case reflect.Slice: + break + } + + i := 0 + for { + // Look ahead for ] - can only happen on first iteration. + op := d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + + // Back up so d.value can have the byte we just read. + d.off-- + d.scan.undo(op) + + // Get element of array, growing if necessary. + if v.Kind() == reflect.Slice { + // Grow slice if necessary + if i >= v.Cap() { + newcap := v.Cap() + v.Cap()/2 + if newcap < 4 { + newcap = 4 + } + newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) + reflect.Copy(newv, v) + v.Set(newv) + } + if i >= v.Len() { + v.SetLen(i + 1) + } + } + + if i < v.Len() { + // Decode into element. + d.value(v.Index(i)) + } else { + // Ran out of fixed array: skip. + d.value(reflect.Value{}) + } + i++ + + // Next token must be , or ]. + op = d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + if op != scanArrayValue { + d.error(errPhase) + } + } + + if i < v.Len() { + if v.Kind() == reflect.Array { + // Array. Zero the rest. + z := reflect.Zero(v.Type().Elem()) + for ; i < v.Len(); i++ { + v.Index(i).Set(z) + } + } else { + v.SetLen(i) + } + } + if i == 0 && v.Kind() == reflect.Slice { + v.Set(reflect.MakeSlice(v.Type(), 0, 0)) + } +} + +var nullLiteral = []byte("null") + +// object consumes an object from d.data[d.off-1:], decoding into the value v. +// the first byte ('{') of the object has been read already. +func (d *decodeState) object(v reflect.Value) { + // Check for unmarshaler. + u, ut, pv := d.indirect(v, false) + if u != nil { + d.off-- + err := u.UnmarshalJSON(d.next()) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + v = pv + + // Decoding into nil interface? Switch to non-reflect code. + if v.Kind() == reflect.Interface && v.NumMethod() == 0 { + v.Set(reflect.ValueOf(d.objectInterface())) + return + } + + // Check type of target: struct or map[string]T + switch v.Kind() { + case reflect.Map: + // map must have string kind + t := v.Type() + if t.Key().Kind() != reflect.String { + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + if v.IsNil() { + v.Set(reflect.MakeMap(t)) + } + case reflect.Struct: + + default: + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + + var mapElem reflect.Value + keys := map[string]bool{} + + for { + // Read opening " of string key or closing }. + op := d.scanWhile(scanSkipSpace) + if op == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if op != scanBeginLiteral { + d.error(errPhase) + } + + // Read key. + start := d.off - 1 + op = d.scanWhile(scanContinue) + item := d.data[start : d.off-1] + key, ok := unquote(item) + if !ok { + d.error(errPhase) + } + + // Check for duplicate keys. + _, ok = keys[key] + if !ok { + keys[key] = true + } else { + d.error(fmt.Errorf("json: duplicate key '%s' in object", key)) + } + + // Figure out field corresponding to key. + var subv reflect.Value + destring := false // whether the value is wrapped in a string to be decoded first + + if v.Kind() == reflect.Map { + elemType := v.Type().Elem() + if !mapElem.IsValid() { + mapElem = reflect.New(elemType).Elem() + } else { + mapElem.Set(reflect.Zero(elemType)) + } + subv = mapElem + } else { + var f *field + fields := cachedTypeFields(v.Type()) + for i := range fields { + ff := &fields[i] + if bytes.Equal(ff.nameBytes, []byte(key)) { + f = ff + break + } + } + if f != nil { + subv = v + destring = f.quoted + for _, i := range f.index { + if subv.Kind() == reflect.Ptr { + if subv.IsNil() { + subv.Set(reflect.New(subv.Type().Elem())) + } + subv = subv.Elem() + } + subv = subv.Field(i) + } + } + } + + // Read : before value. + if op == scanSkipSpace { + op = d.scanWhile(scanSkipSpace) + } + if op != scanObjectKey { + d.error(errPhase) + } + + // Read value. + if destring { + switch qv := d.valueQuoted().(type) { + case nil: + d.literalStore(nullLiteral, subv, false) + case string: + d.literalStore([]byte(qv), subv, true) + default: + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type())) + } + } else { + d.value(subv) + } + + // Write value back to map; + // if using struct, subv points into struct already. + if v.Kind() == reflect.Map { + kv := reflect.ValueOf(key).Convert(v.Type().Key()) + v.SetMapIndex(kv, subv) + } + + // Next token must be , or }. + op = d.scanWhile(scanSkipSpace) + if op == scanEndObject { + break + } + if op != scanObjectValue { + d.error(errPhase) + } + } +} + +// literal consumes a literal from d.data[d.off-1:], decoding into the value v. +// The first byte of the literal has been read already +// (that's how the caller knows it's a literal). +func (d *decodeState) literal(v reflect.Value) { + // All bytes inside literal return scanContinue op code. + start := d.off - 1 + op := d.scanWhile(scanContinue) + + // Scan read one byte too far; back up. + d.off-- + d.scan.undo(op) + + d.literalStore(d.data[start:d.off], v, false) +} + +// convertNumber converts the number literal s to a float64, int64 or a Number +// depending on d.numberDecodeType. +func (d *decodeState) convertNumber(s string) (interface{}, error) { + switch d.numberType { + + case UnmarshalJSONNumber: + return Number(s), nil + case UnmarshalIntOrFloat: + v, err := strconv.ParseInt(s, 10, 64) + if err == nil { + return v, nil + } + + // tries to parse integer number in scientific notation + f, err := strconv.ParseFloat(s, 64) + if err != nil { + return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)} + } + + // if it has no decimal value use int64 + if fi, fd := math.Modf(f); fd == 0.0 { + return int64(fi), nil + } + return f, nil + default: + f, err := strconv.ParseFloat(s, 64) + if err != nil { + return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)} + } + return f, nil + } + +} + +var numberType = reflect.TypeOf(Number("")) + +// literalStore decodes a literal stored in item into v. +// +// fromQuoted indicates whether this literal came from unwrapping a +// string from the ",string" struct tag option. this is used only to +// produce more helpful error messages. +func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) { + // Check for unmarshaler. + if len(item) == 0 { + //Empty string given + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + return + } + wantptr := item[0] == 'n' // null + u, ut, pv := d.indirect(v, wantptr) + if u != nil { + err := u.UnmarshalJSON(item) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + if item[0] != '"' { + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + } + return + } + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + err := ut.UnmarshalText(s) + if err != nil { + d.error(err) + } + return + } + + v = pv + + switch c := item[0]; c { + case 'n': // null + switch v.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + v.Set(reflect.Zero(v.Type())) + // otherwise, ignore null for primitives/string + } + case 't', 'f': // true, false + value := c == 't' + switch v.Kind() { + default: + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) + } + case reflect.Bool: + v.SetBool(value) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(value)) + } else { + d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) + } + } + + case '"': // string + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + switch v.Kind() { + default: + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + case reflect.Slice: + if v.Type().Elem().Kind() != reflect.Uint8 { + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + break + } + b := make([]byte, base64.StdEncoding.DecodedLen(len(s))) + n, err := base64.StdEncoding.Decode(b, s) + if err != nil { + d.saveError(err) + break + } + v.SetBytes(b[:n]) + case reflect.String: + v.SetString(string(s)) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(string(s))) + } else { + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + } + } + + default: // number + if c != '-' && (c < '0' || c > '9') { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + s := string(item) + switch v.Kind() { + default: + if v.Kind() == reflect.String && v.Type() == numberType { + v.SetString(s) + if !isValidNumber(s) { + d.error(fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item)) + } + break + } + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) + } + case reflect.Interface: + n, err := d.convertNumber(s) + if err != nil { + d.saveError(err) + break + } + if v.NumMethod() != 0 { + d.saveError(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) + break + } + v.Set(reflect.ValueOf(n)) + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n, err := strconv.ParseInt(s, 10, 64) + if err != nil || v.OverflowInt(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) + break + } + v.SetInt(n) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n, err := strconv.ParseUint(s, 10, 64) + if err != nil || v.OverflowUint(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) + break + } + v.SetUint(n) + + case reflect.Float32, reflect.Float64: + n, err := strconv.ParseFloat(s, v.Type().Bits()) + if err != nil || v.OverflowFloat(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) + break + } + v.SetFloat(n) + } + } +} + +// The xxxInterface routines build up a value to be stored +// in an empty interface. They are not strictly necessary, +// but they avoid the weight of reflection in this common case. + +// valueInterface is like value but returns interface{} +func (d *decodeState) valueInterface() interface{} { + switch d.scanWhile(scanSkipSpace) { + default: + d.error(errPhase) + panic("unreachable") + case scanBeginArray: + return d.arrayInterface() + case scanBeginObject: + return d.objectInterface() + case scanBeginLiteral: + return d.literalInterface() + } +} + +// arrayInterface is like array but returns []interface{}. +func (d *decodeState) arrayInterface() []interface{} { + var v = make([]interface{}, 0) + for { + // Look ahead for ] - can only happen on first iteration. + op := d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + + // Back up so d.value can have the byte we just read. + d.off-- + d.scan.undo(op) + + v = append(v, d.valueInterface()) + + // Next token must be , or ]. + op = d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + if op != scanArrayValue { + d.error(errPhase) + } + } + return v +} + +// objectInterface is like object but returns map[string]interface{}. +func (d *decodeState) objectInterface() map[string]interface{} { + m := make(map[string]interface{}) + keys := map[string]bool{} + + for { + // Read opening " of string key or closing }. + op := d.scanWhile(scanSkipSpace) + if op == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if op != scanBeginLiteral { + d.error(errPhase) + } + + // Read string key. + start := d.off - 1 + op = d.scanWhile(scanContinue) + item := d.data[start : d.off-1] + key, ok := unquote(item) + if !ok { + d.error(errPhase) + } + + // Check for duplicate keys. + _, ok = keys[key] + if !ok { + keys[key] = true + } else { + d.error(fmt.Errorf("json: duplicate key '%s' in object", key)) + } + + // Read : before value. + if op == scanSkipSpace { + op = d.scanWhile(scanSkipSpace) + } + if op != scanObjectKey { + d.error(errPhase) + } + + // Read value. + m[key] = d.valueInterface() + + // Next token must be , or }. + op = d.scanWhile(scanSkipSpace) + if op == scanEndObject { + break + } + if op != scanObjectValue { + d.error(errPhase) + } + } + return m +} + +// literalInterface is like literal but returns an interface value. +func (d *decodeState) literalInterface() interface{} { + // All bytes inside literal return scanContinue op code. + start := d.off - 1 + op := d.scanWhile(scanContinue) + + // Scan read one byte too far; back up. + d.off-- + d.scan.undo(op) + item := d.data[start:d.off] + + switch c := item[0]; c { + case 'n': // null + return nil + + case 't', 'f': // true, false + return c == 't' + + case '"': // string + s, ok := unquote(item) + if !ok { + d.error(errPhase) + } + return s + + default: // number + if c != '-' && (c < '0' || c > '9') { + d.error(errPhase) + } + n, err := d.convertNumber(string(item)) + if err != nil { + d.saveError(err) + } + return n + } +} + +// getu4 decodes \uXXXX from the beginning of s, returning the hex value, +// or it returns -1. +func getu4(s []byte) rune { + if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { + return -1 + } + r, err := strconv.ParseUint(string(s[2:6]), 16, 64) + if err != nil { + return -1 + } + return rune(r) +} + +// unquote converts a quoted JSON string literal s into an actual string t. +// The rules are different than for Go, so cannot use strconv.Unquote. +func unquote(s []byte) (t string, ok bool) { + s, ok = unquoteBytes(s) + t = string(s) + return +} + +func unquoteBytes(s []byte) (t []byte, ok bool) { + if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { + return + } + s = s[1 : len(s)-1] + + // Check for unusual characters. If there are none, + // then no unquoting is needed, so return a slice of the + // original bytes. + r := 0 + for r < len(s) { + c := s[r] + if c == '\\' || c == '"' || c < ' ' { + break + } + if c < utf8.RuneSelf { + r++ + continue + } + rr, size := utf8.DecodeRune(s[r:]) + if rr == utf8.RuneError && size == 1 { + break + } + r += size + } + if r == len(s) { + return s, true + } + + b := make([]byte, len(s)+2*utf8.UTFMax) + w := copy(b, s[0:r]) + for r < len(s) { + // Out of room? Can only happen if s is full of + // malformed UTF-8 and we're replacing each + // byte with RuneError. + if w >= len(b)-2*utf8.UTFMax { + nb := make([]byte, (len(b)+utf8.UTFMax)*2) + copy(nb, b[0:w]) + b = nb + } + switch c := s[r]; { + case c == '\\': + r++ + if r >= len(s) { + return + } + switch s[r] { + default: + return + case '"', '\\', '/', '\'': + b[w] = s[r] + r++ + w++ + case 'b': + b[w] = '\b' + r++ + w++ + case 'f': + b[w] = '\f' + r++ + w++ + case 'n': + b[w] = '\n' + r++ + w++ + case 'r': + b[w] = '\r' + r++ + w++ + case 't': + b[w] = '\t' + r++ + w++ + case 'u': + r-- + rr := getu4(s[r:]) + if rr < 0 { + return + } + r += 6 + if utf16.IsSurrogate(rr) { + rr1 := getu4(s[r:]) + if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { + // A valid pair; consume. + r += 6 + w += utf8.EncodeRune(b[w:], dec) + break + } + // Invalid surrogate; fall back to replacement rune. + rr = unicode.ReplacementChar + } + w += utf8.EncodeRune(b[w:], rr) + } + + // Quote, control characters are invalid. + case c == '"', c < ' ': + return + + // ASCII + case c < utf8.RuneSelf: + b[w] = c + r++ + w++ + + // Coerce to well-formed UTF-8. + default: + rr, size := utf8.DecodeRune(s[r:]) + r += size + w += utf8.EncodeRune(b[w:], rr) + } + } + return b[0:w], true +} diff --git a/vendor/github.com/go-jose/go-jose/v4/json/encode.go b/vendor/github.com/go-jose/go-jose/v4/json/encode.go new file mode 100644 index 0000000000..98de68ce1e --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v4/json/encode.go @@ -0,0 +1,1197 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package json implements encoding and decoding of JSON objects as defined in +// RFC 4627. The mapping between JSON objects and Go values is described +// in the documentation for the Marshal and Unmarshal functions. +// +// See "JSON and Go" for an introduction to this package: +// https://golang.org/doc/articles/json_and_go.html +package json + +import ( + "bytes" + "encoding" + "encoding/base64" + "fmt" + "math" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +// Marshal returns the JSON encoding of v. +// +// Marshal traverses the value v recursively. +// If an encountered value implements the Marshaler interface +// and is not a nil pointer, Marshal calls its MarshalJSON method +// to produce JSON. If no MarshalJSON method is present but the +// value implements encoding.TextMarshaler instead, Marshal calls +// its MarshalText method. +// The nil pointer exception is not strictly necessary +// but mimics a similar, necessary exception in the behavior of +// UnmarshalJSON. +// +// Otherwise, Marshal uses the following type-dependent default encodings: +// +// Boolean values encode as JSON booleans. +// +// Floating point, integer, and Number values encode as JSON numbers. +// +// String values encode as JSON strings coerced to valid UTF-8, +// replacing invalid bytes with the Unicode replacement rune. +// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e" +// to keep some browsers from misinterpreting JSON output as HTML. +// Ampersand "&" is also escaped to "\u0026" for the same reason. +// +// Array and slice values encode as JSON arrays, except that +// []byte encodes as a base64-encoded string, and a nil slice +// encodes as the null JSON object. +// +// Struct values encode as JSON objects. Each exported struct field +// becomes a member of the object unless +// - the field's tag is "-", or +// - the field is empty and its tag specifies the "omitempty" option. +// +// The empty values are false, 0, any +// nil pointer or interface value, and any array, slice, map, or string of +// length zero. The object's default key string is the struct field name +// but can be specified in the struct field's tag value. The "json" key in +// the struct field's tag value is the key name, followed by an optional comma +// and options. Examples: +// +// // Field is ignored by this package. +// Field int `json:"-"` +// +// // Field appears in JSON as key "myName". +// Field int `json:"myName"` +// +// // Field appears in JSON as key "myName" and +// // the field is omitted from the object if its value is empty, +// // as defined above. +// Field int `json:"myName,omitempty"` +// +// // Field appears in JSON as key "Field" (the default), but +// // the field is skipped if empty. +// // Note the leading comma. +// Field int `json:",omitempty"` +// +// The "string" option signals that a field is stored as JSON inside a +// JSON-encoded string. It applies only to fields of string, floating point, +// integer, or boolean types. This extra level of encoding is sometimes used +// when communicating with JavaScript programs: +// +// Int64String int64 `json:",string"` +// +// The key name will be used if it's a non-empty string consisting of +// only Unicode letters, digits, dollar signs, percent signs, hyphens, +// underscores and slashes. +// +// Anonymous struct fields are usually marshaled as if their inner exported fields +// were fields in the outer struct, subject to the usual Go visibility rules amended +// as described in the next paragraph. +// An anonymous struct field with a name given in its JSON tag is treated as +// having that name, rather than being anonymous. +// An anonymous struct field of interface type is treated the same as having +// that type as its name, rather than being anonymous. +// +// The Go visibility rules for struct fields are amended for JSON when +// deciding which field to marshal or unmarshal. If there are +// multiple fields at the same level, and that level is the least +// nested (and would therefore be the nesting level selected by the +// usual Go rules), the following extra rules apply: +// +// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered, +// even if there are multiple untagged fields that would otherwise conflict. +// 2) If there is exactly one field (tagged or not according to the first rule), that is selected. +// 3) Otherwise there are multiple fields, and all are ignored; no error occurs. +// +// Handling of anonymous struct fields is new in Go 1.1. +// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of +// an anonymous struct field in both current and earlier versions, give the field +// a JSON tag of "-". +// +// Map values encode as JSON objects. +// The map's key type must be string; the map keys are used as JSON object +// keys, subject to the UTF-8 coercion described for string values above. +// +// Pointer values encode as the value pointed to. +// A nil pointer encodes as the null JSON object. +// +// Interface values encode as the value contained in the interface. +// A nil interface value encodes as the null JSON object. +// +// Channel, complex, and function values cannot be encoded in JSON. +// Attempting to encode such a value causes Marshal to return +// an UnsupportedTypeError. +// +// JSON cannot represent cyclic data structures and Marshal does not +// handle them. Passing cyclic structures to Marshal will result in +// an infinite recursion. +func Marshal(v interface{}) ([]byte, error) { + e := &encodeState{} + err := e.marshal(v) + if err != nil { + return nil, err + } + return e.Bytes(), nil +} + +// MarshalIndent is like Marshal but applies Indent to format the output. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + b, err := Marshal(v) + if err != nil { + return nil, err + } + var buf bytes.Buffer + err = Indent(&buf, b, prefix, indent) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029 +// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029 +// so that the JSON will be safe to embed inside HTML diff --git a/vendor/github.com/go-openapi/runtime/middleware/redoc.go b/vendor/github.com/go-openapi/runtime/middleware/redoc.go index 019c854295..b96b01e7f3 100644 --- a/vendor/github.com/go-openapi/runtime/middleware/redoc.go +++ b/vendor/github.com/go-openapi/runtime/middleware/redoc.go @@ -10,67 +10,58 @@ import ( // RedocOpts configures the Redoc middlewares type RedocOpts struct { - // BasePath for the UI path, defaults to: / + // BasePath for the UI, defaults to: / BasePath string - // Path combines with BasePath for the full UI path, defaults to: docs + + // Path combines with BasePath to construct the path to the UI, defaults to: "docs". Path string - // SpecURL the url to find the spec for + + // SpecURL is the URL of the spec document. + // + // Defaults to: /swagger.json SpecURL string - // RedocURL for the js that generates the redoc site, defaults to: https://cdn.jsdelivr.net/npm/redoc/bundles/redoc.standalone.js - RedocURL string + // Title for the documentation site, default to: API documentation Title string + + // Template specifies a custom template to serve the UI + Template string + + // RedocURL points to the js that generates the redoc site. + // + // Defaults to: https://cdn.jsdelivr.net/npm/redoc/bundles/redoc.standalone.js + RedocURL string } // EnsureDefaults in case some options are missing func (r *RedocOpts) EnsureDefaults() { - if r.BasePath == "" { - r.BasePath = "/" - } - if r.Path == "" { - r.Path = "docs" - } - if r.SpecURL == "" { - r.SpecURL = "/swagger.json" - } + common := toCommonUIOptions(r) + common.EnsureDefaults() + fromCommonToAnyOptions(common, r) + + // redoc-specifics if r.RedocURL == "" { r.RedocURL = redocLatest } - if r.Title == "" { - r.Title = "API documentation" + if r.Template == "" { + r.Template = redocTemplate } } // Redoc creates a middleware to serve a documentation site for a swagger spec. -// This allows for altering the spec before starting the http listener. // +// This allows for altering the spec before starting the http listener. func Redoc(opts RedocOpts, next http.Handler) http.Handler { opts.EnsureDefaults() pth := path.Join(opts.BasePath, opts.Path) - tmpl := template.Must(template.New("redoc").Parse(redocTemplate)) - - buf := bytes.NewBuffer(nil) - _ = tmpl.Execute(buf, opts) - b := buf.Bytes() - - return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - if r.URL.Path == pth { - rw.Header().Set("Content-Type", "text/html; charset=utf-8") - rw.WriteHeader(http.StatusOK) - - _, _ = rw.Write(b) - return - } + tmpl := template.Must(template.New("redoc").Parse(opts.Template)) + assets := bytes.NewBuffer(nil) + if err := tmpl.Execute(assets, opts); err != nil { + panic(fmt.Errorf("cannot execute template: %w", err)) + } - if next == nil { - rw.Header().Set("Content-Type", "text/plain") - rw.WriteHeader(http.StatusNotFound) - _, _ = rw.Write([]byte(fmt.Sprintf("%q not found", pth))) - return - } - next.ServeHTTP(rw, r) - }) + return serveUI(pth, assets.Bytes(), next) } const ( diff --git a/vendor/github.com/go-openapi/runtime/middleware/request.go b/vendor/github.com/go-openapi/runtime/middleware/request.go index 760c37861d..82e1436652 100644 --- a/vendor/github.com/go-openapi/runtime/middleware/request.go +++ b/vendor/github.com/go-openapi/runtime/middleware/request.go @@ -19,10 +19,10 @@ import ( "reflect" "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/logger" "github.com/go-openapi/spec" "github.com/go-openapi/strfmt" - - "github.com/go-openapi/runtime" ) // UntypedRequestBinder binds and validates the data from a http request @@ -31,6 +31,7 @@ type UntypedRequestBinder struct { Parameters map[string]spec.Parameter Formats strfmt.Registry paramBinders map[string]*untypedParamBinder + debugLogf func(string, ...any) // a logging function to debug context and all components using it } // NewUntypedRequestBinder creates a new binder for reading a request. @@ -44,6 +45,7 @@ func NewUntypedRequestBinder(parameters map[string]spec.Parameter, spec *spec.Sw paramBinders: binders, Spec: spec, Formats: formats, + debugLogf: debugLogfFunc(nil), } } @@ -52,10 +54,10 @@ func (o *UntypedRequestBinder) Bind(request *http.Request, routeParams RoutePara val := reflect.Indirect(reflect.ValueOf(data)) isMap := val.Kind() == reflect.Map var result []error - debugLog("binding %d parameters for %s %s", len(o.Parameters), request.Method, request.URL.EscapedPath()) + o.debugLogf("binding %d parameters for %s %s", len(o.Parameters), request.Method, request.URL.EscapedPath()) for fieldName, param := range o.Parameters { binder := o.paramBinders[fieldName] - debugLog("binding parameter %s for %s %s", fieldName, request.Method, request.URL.EscapedPath()) + o.debugLogf("binding parameter %s for %s %s", fieldName, request.Method, request.URL.EscapedPath()) var target reflect.Value if !isMap { binder.Name = fieldName @@ -65,7 +67,7 @@ func (o *UntypedRequestBinder) Bind(request *http.Request, routeParams RoutePara if isMap { tpe := binder.Type() if tpe == nil { - if param.Schema.Type.Contains("array") { + if param.Schema.Type.Contains(typeArray) { tpe = reflect.TypeOf([]interface{}{}) } else { tpe = reflect.TypeOf(map[string]interface{}{}) @@ -102,3 +104,14 @@ func (o *UntypedRequestBinder) Bind(request *http.Request, routeParams RoutePara return nil } + +// SetLogger allows for injecting a logger to catch debug entries. +// +// The logger is enabled in DEBUG mode only. +func (o *UntypedRequestBinder) SetLogger(lg logger.Logger) { + o.debugLogf = debugLogfFunc(lg) +} + +func (o *UntypedRequestBinder) setDebugLogf(fn func(string, ...any)) { + o.debugLogf = fn +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/router.go b/vendor/github.com/go-openapi/runtime/middleware/router.go index 5052031c8d..3a6aee90e5 100644 --- a/vendor/github.com/go-openapi/runtime/middleware/router.go +++ b/vendor/github.com/go-openapi/runtime/middleware/router.go @@ -17,10 +17,12 @@ package middleware import ( "fmt" "net/http" + "net/url" fpath "path" "regexp" "strings" + "github.com/go-openapi/runtime/logger" "github.com/go-openapi/runtime/security" "github.com/go-openapi/swag" @@ -67,10 +69,10 @@ func (r RouteParams) GetOK(name string) ([]string, bool, bool) { return nil, false, false } -// NewRouter creates a new context aware router middleware +// NewRouter creates a new context-aware router middleware func NewRouter(ctx *Context, next http.Handler) http.Handler { if ctx.router == nil { - ctx.router = DefaultRouter(ctx.spec, ctx.api) + ctx.router = DefaultRouter(ctx.spec, ctx.api, WithDefaultRouterLoggerFunc(ctx.debugLogf)) } return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { @@ -103,41 +105,75 @@ type RoutableAPI interface { DefaultConsumes() string } -// Router represents a swagger aware router +// Router represents a swagger-aware router type Router interface { Lookup(method, path string) (*MatchedRoute, bool) OtherMethods(method, path string) []string } type defaultRouteBuilder struct { - spec *loads.Document - analyzer *analysis.Spec - api RoutableAPI - records map[string][]denco.Record + spec *loads.Document + analyzer *analysis.Spec + api RoutableAPI + records map[string][]denco.Record + debugLogf func(string, ...any) // a logging function to debug context and all components using it } type defaultRouter struct { - spec *loads.Document - routers map[string]*denco.Router + spec *loads.Document + routers map[string]*denco.Router + debugLogf func(string, ...any) // a logging function to debug context and all components using it } -func newDefaultRouteBuilder(spec *loads.Document, api RoutableAPI) *defaultRouteBuilder { +func newDefaultRouteBuilder(spec *loads.Document, api RoutableAPI, opts ...DefaultRouterOpt) *defaultRouteBuilder { + var o defaultRouterOpts + for _, apply := range opts { + apply(&o) + } + if o.debugLogf == nil { + o.debugLogf = debugLogfFunc(nil) // defaults to standard logger + } + return &defaultRouteBuilder{ - spec: spec, - analyzer: analysis.New(spec.Spec()), - api: api, - records: make(map[string][]denco.Record), + spec: spec, + analyzer: analysis.New(spec.Spec()), + api: api, + records: make(map[string][]denco.Record), + debugLogf: o.debugLogf, } } -// DefaultRouter creates a default implemenation of the router -func DefaultRouter(spec *loads.Document, api RoutableAPI) Router { - builder := newDefaultRouteBuilder(spec, api) +// DefaultRouterOpt allows to inject optional behavior to the default router. +type DefaultRouterOpt func(*defaultRouterOpts) + +type defaultRouterOpts struct { + debugLogf func(string, ...any) +} + +// WithDefaultRouterLogger sets the debug logger for the default router. +// +// This is enabled only in DEBUG mode. +func WithDefaultRouterLogger(lg logger.Logger) DefaultRouterOpt { + return func(o *defaultRouterOpts) { + o.debugLogf = debugLogfFunc(lg) + } +} + +// WithDefaultRouterLoggerFunc sets a logging debug method for the default router. +func WithDefaultRouterLoggerFunc(fn func(string, ...any)) DefaultRouterOpt { + return func(o *defaultRouterOpts) { + o.debugLogf = fn + } +} + +// DefaultRouter creates a default implementation of the router +func DefaultRouter(spec *loads.Document, api RoutableAPI, opts ...DefaultRouterOpt) Router { + builder := newDefaultRouteBuilder(spec, api, opts...) if spec != nil { for method, paths := range builder.analyzer.Operations() { for path, operation := range paths { fp := fpath.Join(spec.BasePath(), path) - debugLog("adding route %s %s %q", method, fp, operation.ID) + builder.debugLogf("adding route %s %s %q", method, fp, operation.ID) builder.AddRoute(method, fp, operation) } } @@ -319,24 +355,24 @@ func (m *MatchedRoute) NeedsAuth() bool { func (d *defaultRouter) Lookup(method, path string) (*MatchedRoute, bool) { mth := strings.ToUpper(method) - debugLog("looking up route for %s %s", method, path) + d.debugLogf("looking up route for %s %s", method, path) if Debug { if len(d.routers) == 0 { - debugLog("there are no known routers") + d.debugLogf("there are no known routers") } for meth := range d.routers { - debugLog("got a router for %s", meth) + d.debugLogf("got a router for %s", meth) } } if router, ok := d.routers[mth]; ok { if m, rp, ok := router.Lookup(fpath.Clean(path)); ok && m != nil { if entry, ok := m.(*routeEntry); ok { - debugLog("found a route for %s %s with %d parameters", method, path, len(entry.Parameters)) + d.debugLogf("found a route for %s %s with %d parameters", method, path, len(entry.Parameters)) var params RouteParams for _, p := range rp { - v, err := pathUnescape(p.Value) + v, err := url.PathUnescape(p.Value) if err != nil { - debugLog("failed to escape %q: %v", p.Value, err) + d.debugLogf("failed to escape %q: %v", p.Value, err) v = p.Value } // a workaround to handle fragment/composing parameters until they are supported in denco router @@ -356,10 +392,10 @@ func (d *defaultRouter) Lookup(method, path string) (*MatchedRoute, bool) { return &MatchedRoute{routeEntry: *entry, Params: params}, true } } else { - debugLog("couldn't find a route by path for %s %s", method, path) + d.debugLogf("couldn't find a route by path for %s %s", method, path) } } else { - debugLog("couldn't find a route by method for %s %s", method, path) + d.debugLogf("couldn't find a route by method for %s %s", method, path) } return nil, false } @@ -378,6 +414,10 @@ func (d *defaultRouter) OtherMethods(method, path string) []string { return methods } +func (d *defaultRouter) SetLogger(lg logger.Logger) { + d.debugLogf = debugLogfFunc(lg) +} + // convert swagger parameters per path segment into a denco parameter as multiple parameters per segment are not supported in denco var pathConverter = regexp.MustCompile(`{(.+?)}([^/]*)`) @@ -413,7 +453,7 @@ func (d *defaultRouteBuilder) AddRoute(method, path string, operation *spec.Oper bp = bp[:len(bp)-1] } - debugLog("operation: %#v", *operation) + d.debugLogf("operation: %#v", *operation) if handler, ok := d.api.HandlerFor(method, strings.TrimPrefix(path, bp)); ok { consumes := d.analyzer.ConsumesFor(operation) produces := d.analyzer.ProducesFor(operation) @@ -428,6 +468,8 @@ func (d *defaultRouteBuilder) AddRoute(method, path string, operation *spec.Oper produces = append(produces, defProduces) } + requestBinder := NewUntypedRequestBinder(parameters, d.spec.Spec(), d.api.Formats()) + requestBinder.setDebugLogf(d.debugLogf) record := denco.NewRecord(pathConverter.ReplaceAllString(path, ":$1"), &routeEntry{ BasePath: bp, PathPattern: path, @@ -439,7 +481,7 @@ func (d *defaultRouteBuilder) AddRoute(method, path string, operation *spec.Oper Producers: d.api.ProducersFor(normalizeOffers(produces)), Parameters: parameters, Formats: d.api.Formats(), - Binder: NewUntypedRequestBinder(parameters, d.spec.Spec(), d.api.Formats()), + Binder: requestBinder, Authenticators: d.buildAuthenticators(operation), Authorizer: d.api.Authorizer(), }) @@ -449,11 +491,11 @@ func (d *defaultRouteBuilder) AddRoute(method, path string, operation *spec.Oper func (d *defaultRouteBuilder) buildAuthenticators(operation *spec.Operation) RouteAuthenticators { requirements := d.analyzer.SecurityRequirementsFor(operation) - var auths []RouteAuthenticator + auths := make([]RouteAuthenticator, 0, len(requirements)) for _, reqs := range requirements { - var schemes []string + schemes := make([]string, 0, len(reqs)) scopes := make(map[string][]string, len(reqs)) - var scopeSlices [][]string + scopeSlices := make([][]string, 0, len(reqs)) for _, req := range reqs { schemes = append(schemes, req.Name) scopes[req.Name] = req.Scopes @@ -482,7 +524,8 @@ func (d *defaultRouteBuilder) Build() *defaultRouter { routers[method] = router } return &defaultRouter{ - spec: d.spec, - routers: routers, + spec: d.spec, + routers: routers, + debugLogf: d.debugLogf, } } diff --git a/vendor/github.com/go-openapi/runtime/middleware/spec.go b/vendor/github.com/go-openapi/runtime/middleware/spec.go index f029142980..87e17e3424 100644 --- a/vendor/github.com/go-openapi/runtime/middleware/spec.go +++ b/vendor/github.com/go-openapi/runtime/middleware/spec.go @@ -19,30 +19,84 @@ import ( "path" ) -// Spec creates a middleware to serve a swagger spec. +const ( + contentTypeHeader = "Content-Type" + applicationJSON = "application/json" +) + +// SpecOption can be applied to the Spec serving middleware +type SpecOption func(*specOptions) + +var defaultSpecOptions = specOptions{ + Path: "", + Document: "swagger.json", +} + +type specOptions struct { + Path string + Document string +} + +func specOptionsWithDefaults(opts []SpecOption) specOptions { + o := defaultSpecOptions + for _, apply := range opts { + apply(&o) + } + + return o +} + +// Spec creates a middleware to serve a swagger spec as a JSON document. +// // This allows for altering the spec before starting the http listener. -// This can be useful if you want to serve the swagger spec from another path than /swagger.json // -func Spec(basePath string, b []byte, next http.Handler) http.Handler { +// The basePath argument indicates the path of the spec document (defaults to "/"). +// Additional SpecOption can be used to change the name of the document (defaults to "swagger.json"). +func Spec(basePath string, b []byte, next http.Handler, opts ...SpecOption) http.Handler { if basePath == "" { basePath = "/" } - pth := path.Join(basePath, "swagger.json") + o := specOptionsWithDefaults(opts) + pth := path.Join(basePath, o.Path, o.Document) return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - if r.URL.Path == pth { - rw.Header().Set("Content-Type", "application/json") + if path.Clean(r.URL.Path) == pth { + rw.Header().Set(contentTypeHeader, applicationJSON) rw.WriteHeader(http.StatusOK) - //#nosec _, _ = rw.Write(b) + return } - if next == nil { - rw.Header().Set("Content-Type", "application/json") - rw.WriteHeader(http.StatusNotFound) + if next != nil { + next.ServeHTTP(rw, r) + return } - next.ServeHTTP(rw, r) + + rw.Header().Set(contentTypeHeader, applicationJSON) + rw.WriteHeader(http.StatusNotFound) }) } + +// WithSpecPath sets the path to be joined to the base path of the Spec middleware. +// +// This is empty by default. +func WithSpecPath(pth string) SpecOption { + return func(o *specOptions) { + o.Path = pth + } +} + +// WithSpecDocument sets the name of the JSON document served as a spec. +// +// By default, this is "swagger.json" +func WithSpecDocument(doc string) SpecOption { + return func(o *specOptions) { + if doc == "" { + return + } + + o.Document = doc + } +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/swaggerui.go b/vendor/github.com/go-openapi/runtime/middleware/swaggerui.go index b4dea29e4b..ec3c10cbaf 100644 --- a/vendor/github.com/go-openapi/runtime/middleware/swaggerui.go +++ b/vendor/github.com/go-openapi/runtime/middleware/swaggerui.go @@ -8,40 +8,65 @@ import ( "path" ) -// SwaggerUIOpts configures the Swaggerui middlewares +// SwaggerUIOpts configures the SwaggerUI middleware type SwaggerUIOpts struct { - // BasePath for the UI path, defaults to: / + // BasePath for the API, defaults to: / BasePath string - // Path combines with BasePath for the full UI path, defaults to: docs + + // Path combines with BasePath to construct the path to the UI, defaults to: "docs". Path string - // SpecURL the url to find the spec for + + // SpecURL is the URL of the spec document. + // + // Defaults to: /swagger.json SpecURL string + + // Title for the documentation site, default to: API documentation + Title string + + // Template specifies a custom template to serve the UI + Template string + // OAuthCallbackURL the url called after OAuth2 login OAuthCallbackURL string // The three components needed to embed swagger-ui - SwaggerURL string + + // SwaggerURL points to the js that generates the SwaggerUI site. + // + // Defaults to: https://unpkg.com/swagger-ui-dist/swagger-ui-bundle.js + SwaggerURL string + SwaggerPresetURL string SwaggerStylesURL string Favicon32 string Favicon16 string - - // Title for the documentation site, default to: API documentation - Title string } // EnsureDefaults in case some options are missing func (r *SwaggerUIOpts) EnsureDefaults() { - if r.BasePath == "" { - r.BasePath = "/" - } - if r.Path == "" { - r.Path = "docs" + r.ensureDefaults() + + if r.Template == "" { + r.Template = swaggeruiTemplate } - if r.SpecURL == "" { - r.SpecURL = "/swagger.json" +} + +func (r *SwaggerUIOpts) EnsureDefaultsOauth2() { + r.ensureDefaults() + + if r.Template == "" { + r.Template = swaggerOAuthTemplate } +} + +func (r *SwaggerUIOpts) ensureDefaults() { + common := toCommonUIOptions(r) + common.EnsureDefaults() + fromCommonToAnyOptions(common, r) + + // swaggerui-specifics if r.OAuthCallbackURL == "" { r.OAuthCallbackURL = path.Join(r.BasePath, r.Path, "oauth2-callback") } @@ -60,40 +85,22 @@ func (r *SwaggerUIOpts) EnsureDefaults() { if r.Favicon32 == "" { r.Favicon32 = swaggerFavicon32Latest } - if r.Title == "" { - r.Title = "API documentation" - } } // SwaggerUI creates a middleware to serve a documentation site for a swagger spec. +// // This allows for altering the spec before starting the http listener. func SwaggerUI(opts SwaggerUIOpts, next http.Handler) http.Handler { opts.EnsureDefaults() pth := path.Join(opts.BasePath, opts.Path) - tmpl := template.Must(template.New("swaggerui").Parse(swaggeruiTemplate)) - - buf := bytes.NewBuffer(nil) - _ = tmpl.Execute(buf, &opts) - b := buf.Bytes() - - return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - if path.Join(r.URL.Path) == pth { - rw.Header().Set("Content-Type", "text/html; charset=utf-8") - rw.WriteHeader(http.StatusOK) - - _, _ = rw.Write(b) - return - } - - if next == nil { - rw.Header().Set("Content-Type", "text/plain") - rw.WriteHeader(http.StatusNotFound) - _, _ = rw.Write([]byte(fmt.Sprintf("%q not found", pth))) - return - } - next.ServeHTTP(rw, r) - }) + tmpl := template.Must(template.New("swaggerui").Parse(opts.Template)) + assets := bytes.NewBuffer(nil) + if err := tmpl.Execute(assets, opts); err != nil { + panic(fmt.Errorf("cannot execute template: %w", err)) + } + + return serveUI(pth, assets.Bytes(), next) } const ( diff --git a/vendor/github.com/go-openapi/runtime/middleware/swaggerui_oauth2.go b/vendor/github.com/go-openapi/runtime/middleware/swaggerui_oauth2.go index 576f6003f7..e81212f71c 100644 --- a/vendor/github.com/go-openapi/runtime/middleware/swaggerui_oauth2.go +++ b/vendor/github.com/go-openapi/runtime/middleware/swaggerui_oauth2.go @@ -4,37 +4,20 @@ import ( "bytes" "fmt" "net/http" - "path" "text/template" ) func SwaggerUIOAuth2Callback(opts SwaggerUIOpts, next http.Handler) http.Handler { - opts.EnsureDefaults() + opts.EnsureDefaultsOauth2() pth := opts.OAuthCallbackURL - tmpl := template.Must(template.New("swaggeroauth").Parse(swaggerOAuthTemplate)) + tmpl := template.Must(template.New("swaggeroauth").Parse(opts.Template)) + assets := bytes.NewBuffer(nil) + if err := tmpl.Execute(assets, opts); err != nil { + panic(fmt.Errorf("cannot execute template: %w", err)) + } - buf := bytes.NewBuffer(nil) - _ = tmpl.Execute(buf, &opts) - b := buf.Bytes() - - return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - if path.Join(r.URL.Path) == pth { - rw.Header().Set("Content-Type", "text/html; charset=utf-8") - rw.WriteHeader(http.StatusOK) - - _, _ = rw.Write(b) - return - } - - if next == nil { - rw.Header().Set("Content-Type", "text/plain") - rw.WriteHeader(http.StatusNotFound) - _, _ = rw.Write([]byte(fmt.Sprintf("%q not found", pth))) - return - } - next.ServeHTTP(rw, r) - }) + return serveUI(pth, assets.Bytes(), next) } const ( diff --git a/vendor/github.com/go-openapi/runtime/middleware/ui_options.go b/vendor/github.com/go-openapi/runtime/middleware/ui_options.go new file mode 100644 index 0000000000..b86efa0089 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/ui_options.go @@ -0,0 +1,173 @@ +package middleware + +import ( + "bytes" + "encoding/gob" + "fmt" + "net/http" + "path" + "strings" +) + +const ( + // constants that are common to all UI-serving middlewares + defaultDocsPath = "docs" + defaultDocsURL = "/swagger.json" + defaultDocsTitle = "API Documentation" +) + +// uiOptions defines common options for UI serving middlewares. +type uiOptions struct { + // BasePath for the UI, defaults to: / + BasePath string + + // Path combines with BasePath to construct the path to the UI, defaults to: "docs". + Path string + + // SpecURL is the URL of the spec document. + // + // Defaults to: /swagger.json + SpecURL string + + // Title for the documentation site, default to: API documentation + Title string + + // Template specifies a custom template to serve the UI + Template string +} + +// toCommonUIOptions converts any UI option type to retain the common options. +// +// This uses gob encoding/decoding to convert common fields from one struct to another. +func toCommonUIOptions(opts interface{}) uiOptions { + var buf bytes.Buffer + enc := gob.NewEncoder(&buf) + dec := gob.NewDecoder(&buf) + var o uiOptions + err := enc.Encode(opts) + if err != nil { + panic(err) + } + + err = dec.Decode(&o) + if err != nil { + panic(err) + } + + return o +} + +func fromCommonToAnyOptions[T any](source uiOptions, target *T) { + var buf bytes.Buffer + enc := gob.NewEncoder(&buf) + dec := gob.NewDecoder(&buf) + err := enc.Encode(source) + if err != nil { + panic(err) + } + + err = dec.Decode(target) + if err != nil { + panic(err) + } +} + +// UIOption can be applied to UI serving middleware, such as Context.APIHandler or +// Context.APIHandlerSwaggerUI to alter the defaut behavior. +type UIOption func(*uiOptions) + +func uiOptionsWithDefaults(opts []UIOption) uiOptions { + var o uiOptions + for _, apply := range opts { + apply(&o) + } + + return o +} + +// WithUIBasePath sets the base path from where to serve the UI assets. +// +// By default, Context middleware sets this value to the API base path. +func WithUIBasePath(base string) UIOption { + return func(o *uiOptions) { + if !strings.HasPrefix(base, "/") { + base = "/" + base + } + o.BasePath = base + } +} + +// WithUIPath sets the path from where to serve the UI assets (i.e. /{basepath}/{path}. +func WithUIPath(pth string) UIOption { + return func(o *uiOptions) { + o.Path = pth + } +} + +// WithUISpecURL sets the path from where to serve swagger spec document. +// +// This may be specified as a full URL or a path. +// +// By default, this is "/swagger.json" +func WithUISpecURL(specURL string) UIOption { + return func(o *uiOptions) { + o.SpecURL = specURL + } +} + +// WithUITitle sets the title of the UI. +// +// By default, Context middleware sets this value to the title found in the API spec. +func WithUITitle(title string) UIOption { + return func(o *uiOptions) { + o.Title = title + } +} + +// WithTemplate allows to set a custom template for the UI. +// +// UI middleware will panic if the template does not parse or execute properly. +func WithTemplate(tpl string) UIOption { + return func(o *uiOptions) { + o.Template = tpl + } +} + +// EnsureDefaults in case some options are missing +func (r *uiOptions) EnsureDefaults() { + if r.BasePath == "" { + r.BasePath = "/" + } + if r.Path == "" { + r.Path = defaultDocsPath + } + if r.SpecURL == "" { + r.SpecURL = defaultDocsURL + } + if r.Title == "" { + r.Title = defaultDocsTitle + } +} + +// serveUI creates a middleware that serves a templated asset as text/html. +func serveUI(pth string, assets []byte, next http.Handler) http.Handler { + return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + if path.Clean(r.URL.Path) == pth { + rw.Header().Set(contentTypeHeader, "text/html; charset=utf-8") + rw.WriteHeader(http.StatusOK) + _, _ = rw.Write(assets) + + return + } + + if next != nil { + next.ServeHTTP(rw, r) + + return + } + + rw.Header().Set(contentTypeHeader, "text/plain") + rw.WriteHeader(http.StatusNotFound) + _, _ = rw.Write([]byte(fmt.Sprintf("%q not found", pth))) + }) +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/untyped/api.go b/vendor/github.com/go-openapi/runtime/middleware/untyped/api.go index 39a85f7d9e..7b7269bd19 100644 --- a/vendor/github.com/go-openapi/runtime/middleware/untyped/api.go +++ b/vendor/github.com/go-openapi/runtime/middleware/untyped/api.go @@ -197,30 +197,31 @@ func (d *API) Validate() error { // validateWith validates the registrations in this API against the provided spec analyzer func (d *API) validate() error { - var consumes []string + consumes := make([]string, 0, len(d.consumers)) for k := range d.consumers { consumes = append(consumes, k) } - var produces []string + produces := make([]string, 0, len(d.producers)) for k := range d.producers { produces = append(produces, k) } - var authenticators []string + authenticators := make([]string, 0, len(d.authenticators)) for k := range d.authenticators { authenticators = append(authenticators, k) } - var operations []string + operations := make([]string, 0, len(d.operations)) for m, v := range d.operations { for p := range v { operations = append(operations, fmt.Sprintf("%s %s", strings.ToUpper(m), p)) } } - var definedAuths []string - for k := range d.spec.Spec().SecurityDefinitions { + secDefinitions := d.spec.Spec().SecurityDefinitions + definedAuths := make([]string, 0, len(secDefinitions)) + for k := range secDefinitions { definedAuths = append(definedAuths, k) } @@ -267,7 +268,7 @@ func (d *API) verify(name string, registrations []string, expectations []string) delete(expected, k) } - var unregistered []string + unregistered := make([]string, 0, len(expected)) for k := range expected { unregistered = append(unregistered, k) } diff --git a/vendor/github.com/go-openapi/runtime/middleware/validation.go b/vendor/github.com/go-openapi/runtime/middleware/validation.go index 1f0135b578..0a5356c607 100644 --- a/vendor/github.com/go-openapi/runtime/middleware/validation.go +++ b/vendor/github.com/go-openapi/runtime/middleware/validation.go @@ -35,7 +35,6 @@ type validation struct { // ContentType validates the content type of a request func validateContentType(allowed []string, actual string) error { - debugLog("validating content type for %q against [%s]", actual, strings.Join(allowed, ", ")) if len(allowed) == 0 { return nil } @@ -57,13 +56,13 @@ func validateContentType(allowed []string, actual string) error { } func validateRequest(ctx *Context, request *http.Request, route *MatchedRoute) *validation { - debugLog("validating request %s %s", request.Method, request.URL.EscapedPath()) validate := &validation{ context: ctx, request: request, route: route, bound: make(map[string]interface{}), } + validate.debugLogf("validating request %s %s", request.Method, request.URL.EscapedPath()) validate.contentType() if len(validate.result) == 0 { @@ -76,8 +75,12 @@ func validateRequest(ctx *Context, request *http.Request, route *MatchedRoute) * return validate } +func (v *validation) debugLogf(format string, args ...any) { + v.context.debugLogf(format, args...) +} + func (v *validation) parameters() { - debugLog("validating request parameters for %s %s", v.request.Method, v.request.URL.EscapedPath()) + v.debugLogf("validating request parameters for %s %s", v.request.Method, v.request.URL.EscapedPath()) if result := v.route.Binder.Bind(v.request, v.route.Params, v.route.Consumer, v.bound); result != nil { if result.Error() == "validation failure list" { for _, e := range result.(*errors.Validation).Value.([]interface{}) { @@ -91,7 +94,7 @@ func (v *validation) parameters() { func (v *validation) contentType() { if len(v.result) == 0 && runtime.HasBody(v.request) { - debugLog("validating body content type for %s %s", v.request.Method, v.request.URL.EscapedPath()) + v.debugLogf("validating body content type for %s %s", v.request.Method, v.request.URL.EscapedPath()) ct, _, req, err := v.context.ContentType(v.request) if err != nil { v.result = append(v.result, err) @@ -100,6 +103,7 @@ func (v *validation) contentType() { } if len(v.result) == 0 { + v.debugLogf("validating content type for %q against [%s]", ct, strings.Join(v.route.Consumes, ", ")) if err := validateContentType(v.route.Consumes, ct); err != nil { v.result = append(v.result, err) } diff --git a/vendor/github.com/go-openapi/runtime/request.go b/vendor/github.com/go-openapi/runtime/request.go index 078fda1739..9e3e1ecb14 100644 --- a/vendor/github.com/go-openapi/runtime/request.go +++ b/vendor/github.com/go-openapi/runtime/request.go @@ -16,6 +16,8 @@ package runtime import ( "bufio" + "context" + "errors" "io" "net/http" "strings" @@ -96,10 +98,16 @@ func (p *peekingReader) Read(d []byte) (int, error) { if p == nil { return 0, io.EOF } + if p.underlying == nil { + return 0, io.ErrUnexpectedEOF + } return p.underlying.Read(d) } func (p *peekingReader) Close() error { + if p.underlying == nil { + return errors.New("reader already closed") + } p.underlying = nil if p.orig != nil { return p.orig.Close() @@ -107,9 +115,11 @@ func (p *peekingReader) Close() error { return nil } -// JSONRequest creates a new http request with json headers set +// JSONRequest creates a new http request with json headers set. +// +// It uses context.Background. func JSONRequest(method, urlStr string, body io.Reader) (*http.Request, error) { - req, err := http.NewRequest(method, urlStr, body) + req, err := http.NewRequestWithContext(context.Background(), method, urlStr, body) if err != nil { return nil, err } diff --git a/vendor/github.com/go-openapi/runtime/security/authenticator.go b/vendor/github.com/go-openapi/runtime/security/authenticator.go index c3ffdac7e8..bb30472bbe 100644 --- a/vendor/github.com/go-openapi/runtime/security/authenticator.go +++ b/vendor/github.com/go-openapi/runtime/security/authenticator.go @@ -25,12 +25,13 @@ import ( ) const ( - query = "query" - header = "header" + query = "query" + header = "header" + accessTokenParam = "access_token" ) // HttpAuthenticator is a function that authenticates a HTTP request -func HttpAuthenticator(handler func(*http.Request) (bool, interface{}, error)) runtime.Authenticator { +func HttpAuthenticator(handler func(*http.Request) (bool, interface{}, error)) runtime.Authenticator { //nolint:revive,stylecheck return runtime.AuthenticatorFunc(func(params interface{}) (bool, interface{}, error) { if request, ok := params.(*http.Request); ok { return handler(request) @@ -158,7 +159,7 @@ func APIKeyAuth(name, in string, authenticate TokenAuthentication) runtime.Authe inl := strings.ToLower(in) if inl != query && inl != header { // panic because this is most likely a typo - panic(errors.New(500, "api key auth: in value needs to be either \"query\" or \"header\".")) + panic(errors.New(500, "api key auth: in value needs to be either \"query\" or \"header\"")) } var getToken func(*http.Request) string @@ -186,7 +187,7 @@ func APIKeyAuthCtx(name, in string, authenticate TokenAuthenticationCtx) runtime inl := strings.ToLower(in) if inl != query && inl != header { // panic because this is most likely a typo - panic(errors.New(500, "api key auth: in value needs to be either \"query\" or \"header\".")) + panic(errors.New(500, "api key auth: in value needs to be either \"query\" or \"header\"")) } var getToken func(*http.Request) string @@ -226,12 +227,12 @@ func BearerAuth(name string, authenticate ScopedTokenAuthentication) runtime.Aut } if token == "" { qs := r.Request.URL.Query() - token = qs.Get("access_token") + token = qs.Get(accessTokenParam) } //#nosec ct, _, _ := runtime.ContentType(r.Request.Header) if token == "" && (ct == "application/x-www-form-urlencoded" || ct == "multipart/form-data") { - token = r.Request.FormValue("access_token") + token = r.Request.FormValue(accessTokenParam) } if token == "" { @@ -256,12 +257,12 @@ func BearerAuthCtx(name string, authenticate ScopedTokenAuthenticationCtx) runti } if token == "" { qs := r.Request.URL.Query() - token = qs.Get("access_token") + token = qs.Get(accessTokenParam) } //#nosec ct, _, _ := runtime.ContentType(r.Request.Header) if token == "" && (ct == "application/x-www-form-urlencoded" || ct == "multipart/form-data") { - token = r.Request.FormValue("access_token") + token = r.Request.FormValue(accessTokenParam) } if token == "" { diff --git a/vendor/github.com/go-openapi/runtime/yamlpc/yaml.go b/vendor/github.com/go-openapi/runtime/yamlpc/yaml.go index b30d377126..a1a0a589df 100644 --- a/vendor/github.com/go-openapi/runtime/yamlpc/yaml.go +++ b/vendor/github.com/go-openapi/runtime/yamlpc/yaml.go @@ -18,8 +18,7 @@ import ( "io" "github.com/go-openapi/runtime" - - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" ) // YAMLConsumer creates a consumer for yaml data diff --git a/vendor/github.com/go-openapi/spec/.gitignore b/vendor/github.com/go-openapi/spec/.gitignore index dd91ed6a04..f47cb2045f 100644 --- a/vendor/github.com/go-openapi/spec/.gitignore +++ b/vendor/github.com/go-openapi/spec/.gitignore @@ -1,2 +1 @@ -secrets.yml -coverage.out +*.out diff --git a/vendor/github.com/go-openapi/spec/.golangci.yml b/vendor/github.com/go-openapi/spec/.golangci.yml index 835d55e742..22f8d21cca 100644 --- a/vendor/github.com/go-openapi/spec/.golangci.yml +++ b/vendor/github.com/go-openapi/spec/.golangci.yml @@ -11,7 +11,7 @@ linters-settings: threshold: 200 goconst: min-len: 2 - min-occurrences: 2 + min-occurrences: 3 linters: enable-all: true @@ -40,3 +40,22 @@ linters: - tparallel - thelper - ifshort + - exhaustruct + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam + - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + - deadcode + - interfacer + - scopelint + - varcheck + - structcheck + - golint + - nosnakecase diff --git a/vendor/github.com/go-openapi/spec/README.md b/vendor/github.com/go-openapi/spec/README.md index 18782c6daf..7fd2810c69 100644 --- a/vendor/github.com/go-openapi/spec/README.md +++ b/vendor/github.com/go-openapi/spec/README.md @@ -1,8 +1,5 @@ -# OAI object model +# OpenAPI v2 object model [![Build Status](https://github.com/go-openapi/spec/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/spec/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/spec/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/spec) -[![Build Status](https://travis-ci.org/go-openapi/spec.svg?branch=master)](https://travis-ci.org/go-openapi/spec) - -[![codecov](https://codecov.io/gh/go-openapi/spec/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/spec) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) [![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/spec/master/LICENSE) [![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/spec.svg)](https://pkg.go.dev/github.com/go-openapi/spec) @@ -32,3 +29,26 @@ The object model for OpenAPI specification documents. > This [discussion thread](https://github.com/go-openapi/spec/issues/21) relates the full story. > > An early attempt to support Swagger 3 may be found at: https://github.com/go-openapi/spec3 + +* Does the unmarshaling support YAML? + +> Not directly. The exposed types know only how to unmarshal from JSON. +> +> In order to load a YAML document as a Swagger spec, you need to use the loaders provided by +> github.com/go-openapi/loads +> +> Take a look at the example there: https://pkg.go.dev/github.com/go-openapi/loads#example-Spec +> +> See also https://github.com/go-openapi/spec/issues/164 + +* How can I validate a spec? + +> Validation is provided by [the validate package](http://github.com/go-openapi/validate) + +* Why do we have an `ID` field for `Schema` which is not part of the swagger spec? + +> We found jsonschema compatibility more important: since `id` in jsonschema influences +> how `$ref` are resolved. +> This `id` does not conflict with any property named `id`. +> +> See also https://github.com/go-openapi/spec/issues/23 diff --git a/vendor/github.com/go-openapi/spec/appveyor.yml b/vendor/github.com/go-openapi/spec/appveyor.yml deleted file mode 100644 index 0903593916..0000000000 --- a/vendor/github.com/go-openapi/spec/appveyor.yml +++ /dev/null @@ -1,32 +0,0 @@ -version: "0.1.{build}" - -clone_folder: C:\go-openapi\spec -shallow_clone: true # for startup speed -pull_requests: - do_not_increment_build_number: true - -#skip_tags: true -#skip_branch_with_pr: true - -# appveyor.yml -build: off - -environment: - GOPATH: c:\gopath - -stack: go 1.15 - -test_script: - - go test -v -timeout 20m ./... - -deploy: off - -notifications: - - provider: Slack - incoming_webhook: https://hooks.slack.com/services/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 - auth_token: - secure: Sf7kZf7ZGbnwWUMpffHwMu5A0cHkLK2MYY32LNTPj4+/3qC3Ghl7+9v4TSLOqOlCwdRNjOGblAq7s+GDJed6/xgRQl1JtCi1klzZNrYX4q01pgTPvvGcwbBkIYgeMaPeIRcK9OZnud7sRXdttozgTOpytps2U6Js32ip7uj5mHSg2ub0FwoSJwlS6dbezZ8+eDhoha0F/guY99BEwx8Bd+zROrT2TFGsSGOFGN6wFc7moCqTHO/YkWib13a2QNXqOxCCVBy/lt76Wp+JkeFppjHlzs/2lP3EAk13RIUAaesdEUHvIHrzCyNJEd3/+KO2DzsWOYfpktd+KBCvgaYOsoo7ubdT3IROeAegZdCgo/6xgCEsmFc9ZcqCfN5yNx2A+BZ2Vwmpws+bQ1E1+B5HDzzaiLcYfG4X2O210QVGVDLWsv1jqD+uPYeHY2WRfh5ZsIUFvaqgUEnwHwrK44/8REAhQavt1QAj5uJpsRd7CkRVPWRNK+yIky+wgbVUFEchRNmS55E7QWf+W4+4QZkQi7vUTMc9nbTUu2Es9NfvfudOpM2wZbn98fjpb/qq/nRv6Bk+ca+7XD5/IgNLMbWp2ouDdzbiHLCOfDUiHiDJhLfFZx9Bwo7ZwfzeOlbrQX66bx7xRKYmOe4DLrXhNcpbsMa8qbfxlZRCmYbubB/Y8h4= - channel: bots - on_build_success: false - on_build_failure: true - on_build_status_changed: true diff --git a/vendor/github.com/go-openapi/spec/bindata.go b/vendor/github.com/go-openapi/spec/bindata.go deleted file mode 100644 index afc83850c2..0000000000 --- a/vendor/github.com/go-openapi/spec/bindata.go +++ /dev/null @@ -1,297 +0,0 @@ -// Code generated by go-bindata. DO NOT EDIT. -// sources: -// schemas/jsonschema-draft-04.json (4.357kB) -// schemas/v2/schema.json (40.248kB) - -package spec - -import ( - "bytes" - "compress/gzip" - "crypto/sha256" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - "time" -) - -func bindataRead(data []byte, name string) ([]byte, error) { - gz, err := gzip.NewReader(bytes.NewBuffer(data)) - if err != nil { - return nil, fmt.Errorf("read %q: %v", name, err) - } - - var buf bytes.Buffer - _, err = io.Copy(&buf, gz) - clErr := gz.Close() - - if err != nil { - return nil, fmt.Errorf("read %q: %v", name, err) - } - if clErr != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -type asset struct { - bytes []byte - info os.FileInfo - digest [sha256.Size]byte -} - -type bindataFileInfo struct { - name string - size int64 - mode os.FileMode - modTime time.Time -} - -func (fi bindataFileInfo) Name() string { - return fi.name -} -func (fi bindataFileInfo) Size() int64 { - return fi.size -} -func (fi bindataFileInfo) Mode() os.FileMode { - return fi.mode -} -func (fi bindataFileInfo) ModTime() time.Time { - return fi.modTime -} -func (fi bindataFileInfo) IsDir() bool { - return false -} -func (fi bindataFileInfo) Sys() interface{} { - return nil -} - -var _jsonschemaDraft04Json = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xc4\x57\x3d\x6f\xdb\x3c\x10\xde\xf3\x2b\x08\x26\x63\xf2\x2a\x2f\xd0\xc9\x5b\xd1\x2e\x01\x5a\x34\x43\x37\x23\x03\x6d\x9d\x6c\x06\x14\xa9\x50\x54\x60\xc3\xd0\x7f\x2f\x28\x4a\x14\x29\x91\x92\x2d\xa7\x8d\x97\x28\xbc\xaf\xe7\x8e\xf7\xc5\xd3\x0d\x42\x08\x61\x9a\xe2\x15\xc2\x7b\xa5\x8a\x55\x92\xbc\x96\x82\x3f\x94\xdb\x3d\xe4\xe4\x3f\x21\x77\x49\x2a\x49\xa6\x1e\x1e\xbf\x24\xe6\xec\x16\xdf\x1b\xa1\x3b\xf3\xff\x02\xc9\x14\xca\xad\xa4\x85\xa2\x82\x6b\xe9\x6f\x42\x02\x32\x2c\x28\x07\x45\x5a\x15\x3d\x77\x46\x39\xd5\xcc\x25\x5e\x21\x83\xb8\x21\x18\xb6\xaf\x52\x92\xa3\x47\x68\x88\xea\x58\x80\x56\x4e\x1a\xf2\xbd\x4f\xcc\x29\x7f\x52\x90\x6b\x7d\xff\x0f\x48\xb4\x3d\x3f\x21\x7c\x27\x21\xd3\x2a\x6e\x31\xaa\x2d\x53\xdd\xf3\xe3\x42\x94\x54\xd1\x77\x78\xe2\x0a\x76\x20\xe3\x20\x68\xcb\x30\x86\x41\xf3\x2a\xc7\x2b\xf4\x78\x8e\xfe\xef\x90\x91\x8a\xa9\xc7\xb1\x1d\xc2\xd8\x2f\x0d\x75\xed\xc1\x4e\x9c\xc8\x25\x43\xac\xa8\xbe\xd7\xcc\xa9\xd1\xa9\x21\xa0\x1a\xbd\x04\x61\x94\x34\x2f\x18\xfc\x3e\x16\x50\x8e\x4d\x03\x6f\x1c\x58\xdb\x48\x23\xbc\x11\x82\x01\xe1\xfa\xd3\x3a\x8e\x30\xaf\x18\x33\x7f\xf3\x8d\x39\x11\x9b\x57\xd8\x2a\xfd\x55\x2a\x49\xf9\x0e\xc7\xec\x37\xd4\x25\xf7\xec\x5c\x66\xc7\xd7\x99\xaa\xcf\x4f\x89\x8a\xd3\xb7\x0a\x3a\xaa\x92\x15\xf4\x30\x6f\x1c\xb0\xd6\x46\xe7\x98\x39\x2d\xa4\x28\x40\x2a\x3a\x88\x9e\x29\xba\x88\x37\x2d\xca\x60\x38\xfa\xba\x5b\x20\xac\xa8\x62\xb0\x4c\xd4\xaf\xda\x45\x0a\xba\x5c\x3b\xb9\xc7\x79\xc5\x14\x2d\x18\x34\x19\x1c\x51\xdb\x25\x4d\xb4\x7e\x06\x14\x38\x6c\x59\x55\xd2\x77\xf8\x69\x59\xfc\x7b\x73\xed\x93\x43\xcb\x32\x6d\x3c\x28\xdc\x1b\x9a\xd3\x62\xab\xc2\x27\xf7\x41\xc9\x08\x2b\x23\x08\xad\x13\x57\x21\x9c\xd3\x72\x0d\x42\x72\xf8\x01\x7c\xa7\xf6\x83\xce\x39\xd7\x82\x3c\x1f\x2f\xd6\x60\x1b\xa2\xdf\x35\x89\x52\x20\xe7\x73\x74\xe0\x66\x26\x64\x4e\xb4\x97\x58\xc2\x0e\x0e\xe1\x60\x92\x34\x6d\xa0\x10\xd6\xb5\x83\x61\x27\xe6\x47\xd3\x89\xbd\x63\xfd\x3b\x8d\x03\x3d\x6c\x42\x2d\x5b\x70\xee\xe8\xdf\x4b\xf4\x66\x4e\xe1\x01\x45\x17\x80\x74\xad\x4f\xc3\xf3\xae\xc6\x1d\xc6\xd7\xc2\xce\xc9\xe1\x29\x30\x86\x2f\x4a\xa6\x4b\x15\x84\x73\xc9\x6f\xfd\x7f\xa5\x6e\x9e\xbd\xf1\xb0\xd4\xdd\x45\x5a\xc2\x3e\x4b\x78\xab\xa8\x84\x74\x4a\x91\x3b\x92\x23\x05\xf2\x1c\x1e\x7b\xf3\x09\xf8\xcf\xab\x24\xb6\x60\xa2\xe8\x4c\x9f\x75\x77\xaa\x8c\xe6\x01\x45\x36\x86\xcf\xc3\x63\x3a\xea\xd4\x8d\x7e\x06\xac\x14\x0a\xe0\x29\xf0\xed\x07\x22\x1a\x65\xda\x44\xae\xa2\x73\x1a\xe6\x90\x69\xa2\x8c\x46\xb2\x2f\xde\x49\x38\x08\xed\xfe\xfd\x41\xaf\x9f\xa9\x55\xd7\xdd\x22\x8d\xfa\x45\x63\xc5\x0f\x80\xf3\xb4\x08\xd6\x79\x30\x9e\x93\xee\x59\xa6\xd0\x4b\xee\x22\xe3\x33\xc1\x3a\x27\x68\x36\x78\x7e\x87\x0a\x06\xd5\x2e\x20\xd3\xaf\x15\xfb\xd8\x3b\x73\x14\xbb\x92\xed\x05\x5d\x2e\x29\x38\x2c\x94\xe4\x42\x45\x5e\xd3\xb5\x7d\xdf\x47\xca\x38\xb4\x5c\xaf\xfb\x7d\xdd\x6d\xf4\xa1\x2d\x77\xdd\x2f\xce\x6d\xc4\x7b\x8b\x4e\x67\xa9\x6f\xfe\x04\x00\x00\xff\xff\xb1\xd1\x27\x78\x05\x11\x00\x00") - -func jsonschemaDraft04JsonBytes() ([]byte, error) { - return bindataRead( - _jsonschemaDraft04Json, - "jsonschema-draft-04.json", - ) -} - -func jsonschemaDraft04Json() (*asset, error) { - bytes, err := jsonschemaDraft04JsonBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "jsonschema-draft-04.json", size: 4357, mode: os.FileMode(0640), modTime: time.Unix(1568963823, 0)} - a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe1, 0x48, 0x9d, 0xb, 0x47, 0x55, 0xf0, 0x27, 0x93, 0x30, 0x25, 0x91, 0xd3, 0xfc, 0xb8, 0xf0, 0x7b, 0x68, 0x93, 0xa8, 0x2a, 0x94, 0xf2, 0x48, 0x95, 0xf8, 0xe4, 0xed, 0xf1, 0x1b, 0x82, 0xe2}} - return a, nil -} - -var _v2SchemaJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x5d\x4f\x93\xdb\x36\xb2\xbf\xfb\x53\xa0\x14\x57\xd9\xae\xd8\x92\xe3\xf7\x2e\xcf\x97\xd4\xbc\xd8\x49\x66\x37\x5e\x4f\x79\x26\xbb\x87\x78\x5c\x05\x91\x2d\x09\x09\x09\x30\x00\x38\x33\x5a\xef\x7c\xf7\x2d\xf0\x9f\x08\x02\x20\x41\x8a\xd2\xc8\x0e\x0f\xa9\x78\x28\xa0\xd1\xdd\x68\x34\x7e\xdd\xf8\xf7\xf9\x11\x42\x33\x49\x64\x04\xb3\xd7\x68\x76\x86\xfe\x76\xf9\xfe\x1f\xe8\x32\xd8\x40\x8c\xd1\x8a\x71\x74\x79\x8b\xd7\x6b\xe0\xe8\xd5\xfc\x25\x3a\xbb\x38\x9f\xcf\x9e\xab\x0a\x24\x54\xa5\x37\x52\x26\xaf\x17\x0b\x91\x17\x99\x13\xb6\xb8\x79\xb5\x10\x59\xdd\xf9\xef\x82\xd1\x6f\xf2\xc2\x8f\xf3\x4f\xb5\x1a\xea\xc7\x17\x45\x41\xc6\xd7\x8b\x90\xe3\x95\x7c\xf1\xf2\x7f\x8b\xca\x45\x3d\xb9\x4d\x32\xa6\xd8\xf2\x77\x08\x64\xfe\x8d\xc3\x9f\x29\xe1\xa0\x9a\xff\xed\x11\x42\x08\xcd\x8a\xd6\xb3\x9f\x15\x67\x74\xc5\xca\x7f\x27\x58\x6e\xc4\xec\x11\x42\xd7\x59\x5d\x1c\x86\x44\x12\x46\x71\x74\xc1\x59\x02\x5c\x12\x10\xb3\xd7\x68\x85\x23\x01\x59\x81\x04\x4b\x09\x9c\x6a\xbf\x7e\xce\x49\x7d\xba\x7b\x51\xfd\xa1\x44\xe2\xb0\x52\xac\x7d\xb3\x08\x61\x45\x68\x46\x56\x2c\x6e\x80\x86\x8c\xbf\xbd\x93\x40\x05\x61\x74\x96\x95\xbe\x7f\x84\xd0\x7d\x4e\xde\x42\xb7\xe4\xbe\x46\xbb\x14\x5b\x48\x4e\xe8\xba\x90\x05\xa1\x19\xd0\x34\xae\xc4\xce\xbe\xbc\x9a\xbf\x9c\x15\x7f\x5d\x57\xc5\x42\x10\x01\x27\x89\xe2\x48\x51\xb9\xda\x40\xd5\x87\x37\xc0\x15\x5f\x88\xad\x90\xdc\x10\x81\x42\x16\xa4\x31\x50\x39\x2f\x38\xad\xab\xb0\x53\xd8\xac\x94\x56\x6f\xc3\x84\xf4\x11\xa4\x50\xb3\xfa\xe9\xd3\x6f\x9f\x3e\xdf\x2f\xd0\xeb\x8f\x1f\x3f\x7e\xbc\xfe\xf6\xe9\xf7\xaf\x5f\x7f\xfc\x18\x7e\xfb\xec\xfb\xc7\xb3\x36\x79\x54\x43\xe8\x29\xc5\x31\x20\xc6\x11\x49\x9e\xe5\x12\x41\x66\xa0\xe8\xed\x1d\x8e\x93\x08\x5e\xa3\x27\x3b\xc3\x7c\xa2\x73\xba\xc4\x02\x2e\xb0\xdc\xf4\xe5\x76\xd1\xca\x96\xa2\x8a\x94\xcd\x21\xc9\x6c\xec\x2c\x70\x42\x9e\x34\x74\x9d\x19\x7c\xcd\x20\x9c\xea\x2e\x0a\xfe\x42\x84\xd4\x29\x04\x8c\x8a\xb4\x41\xa2\xc1\xdc\x19\x8a\x88\x90\x4a\x49\xef\xce\xdf\xbd\x45\x4a\x52\x81\x70\x10\x40\x22\x21\x44\xcb\x6d\xc5\xec\x4e\x3c\x1c\x45\xef\x57\x9a\xb5\x7d\xae\xfe\xe5\xe4\x31\x86\x90\xe0\xab\x6d\x02\x3b\x2e\xcb\x11\x90\xd9\xa8\xc6\x77\xc2\x59\x98\x06\xfd\xf9\x2e\x78\x45\x01\xa6\xa8\xa0\x71\x5c\xbe\x33\xa7\xd2\xd9\x5f\x95\xef\xd9\xd5\xac\xfd\xdc\x5d\xbf\x5e\xb8\xd1\x3e\xc7\x31\x48\xe0\x5e\x4c\x14\x65\xdf\xb8\xa8\x71\x10\x09\xa3\xc2\xc7\x02\xcb\xa2\x4e\x5a\x02\x82\x94\x13\xb9\xf5\x30\xe6\xb2\xa4\xb5\xfe\x9b\x3e\x7a\xb2\x55\xd2\xa8\x4a\xbc\x16\xb6\x71\x8e\x39\xc7\xdb\x9d\xe1\x10\x09\x71\xbd\x9c\xb3\x41\x89\xd7\xa5\x89\xdc\x57\xb5\x53\x4a\xfe\x4c\xe1\xbc\xa0\x21\x79\x0a\x1a\x0f\x70\xa7\x5c\x08\x8e\xde\xb0\xc0\x43\x24\xad\x74\x63\x0e\xb1\xd9\x90\xe1\xb0\x2d\x13\xa7\x6d\x78\xfd\x04\x14\x38\x8e\x90\xaa\xce\x63\xac\x3e\x23\xbc\x64\xa9\xb4\xf8\x03\x63\xde\xcd\xbe\x16\x13\x4a\x55\xac\x82\x12\xc6\xac\xd4\x35\xf7\x22\xd4\x3a\xff\x22\x73\x0e\x6e\x51\xa0\x75\x1e\xae\x8f\xe8\x5d\xc7\x59\xe6\xe4\x9a\x18\x8d\xd6\x1c\x53\x84\x4d\xb7\x67\x28\x37\x09\x84\x69\x88\x12\x0e\x01\x11\x80\x32\xa2\xf5\xb9\xaa\xc6\xd9\x73\x53\xab\xfb\xb4\x2e\x20\xc6\x54\x92\xa0\x9a\xf3\x69\x1a\x2f\x81\x77\x37\xae\x53\x1a\xce\x40\xc4\xa8\x82\x1c\xb5\xef\xda\x24\x7d\xb9\x61\x69\x14\xa2\x25\xa0\x90\xac\x56\xc0\x81\x4a\xb4\xe2\x2c\xce\x4a\x64\x7a\x9a\x23\xf4\x13\x91\x3f\xa7\x4b\xf4\x63\x84\x6f\x18\x87\x10\xbd\xc3\xfc\x8f\x90\xdd\x52\x44\x04\xc2\x51\xc4\x6e\x21\x74\x48\x21\x81\xc7\xe2\xfd\xea\x12\xf8\x0d\x09\xf6\xe9\x47\x35\xaf\x67\xc4\x14\xf7\x22\x27\x97\xe1\xe2\x76\x2d\x06\x8c\x4a\x1c\x48\x3f\x73\x2d\x0b\x5b\x29\x45\x24\x00\x2a\x0c\x11\xec\x94\xca\xc2\xa6\xc1\x37\x21\x43\x83\x3b\x5f\x97\xf1\x43\x5e\x53\x73\x19\xa5\x36\xd8\x2d\x05\x2e\x34\x0b\xeb\x39\xfc\x1d\x63\x51\x01\xbd\x3d\xbb\x90\x84\x40\x25\x59\x6d\x09\x5d\xa3\x1c\x37\xe6\x5c\x16\x9a\x40\x09\x70\xc1\xe8\x82\xf1\x35\xa6\xe4\xdf\x99\x5c\x8e\x9e\x4d\x79\xb4\x27\x2f\xbf\x7e\xf8\x05\x25\x8c\x50\xa9\x98\x29\x90\x62\x60\xea\x75\xae\x13\xca\xbf\x2b\x1a\x29\x27\x76\xd6\x20\xc6\x64\x5f\xe6\x32\x1a\x08\x87\x21\x07\x21\xbc\xb4\xe4\xe0\x32\x67\xa6\xcd\xf3\x1e\xcd\xd9\x6b\xb6\x6f\x8e\x27\xa7\xed\xdb\xe7\xbc\xcc\x1a\x07\xce\x6f\x87\x33\xf0\xba\x51\x17\x22\x66\x78\x79\x8e\xce\xe5\x13\x81\x80\x06\x2c\xe5\x78\x0d\xa1\xb2\xb8\x54\xa8\x79\x09\xbd\xbf\x3c\x47\x01\x8b\x13\x2c\xc9\x32\xaa\xaa\x1d\xd5\xee\xab\x36\xbd\x6c\xfd\x54\x6c\xc8\x08\x01\x3c\xbd\xe7\x07\x88\xb0\x24\x37\x79\x90\x28\x4a\x1d\x10\x1a\x92\x1b\x12\xa6\x38\x42\x40\xc3\x4c\x43\x62\x8e\xae\x36\xb0\x45\x71\x2a\xa4\x9a\x23\x79\x59\xb1\xa8\xf2\xa4\x0c\x60\x9f\xcc\x8d\x40\xf5\x80\xca\xa8\x99\xc3\xa7\x85\x1f\x31\x25\xa9\x82\xc5\x6d\xbd\xd8\x36\x76\x7c\x02\x28\x97\xf6\x1d\x74\x3b\x11\x7e\x91\xae\x32\xf8\x6c\xf4\xe6\x7b\x9a\xa5\x1f\x62\xc6\x21\xcf\x9a\xe5\xed\x8b\x02\xf3\x2c\x33\x33\xdf\x00\xca\xc9\x09\xb4\x04\xf5\xa5\x08\xd7\xc3\x02\x18\x66\xf1\xab\x1e\x83\x37\x4c\xcd\x12\xc1\x1d\x50\xf6\xaa\xbd\xfe\xe2\x73\x48\x38\x08\xa0\x32\x9b\x18\x44\x86\x0b\x6a\xc1\xaa\x26\x96\x2d\x96\x3c\xa0\x54\x65\x73\xe3\x08\xb5\x8b\x99\xbd\x82\xbc\x9e\xc2\xe8\x53\x46\x83\x3f\x33\x54\x2b\x5b\xad\x92\x79\xd9\x8f\x5d\x93\x98\xf2\xe6\xc6\x1c\xe6\x9a\x9e\xfc\x43\x82\x31\x66\x8e\x53\x77\xfe\x90\xe7\xf3\xf6\xe9\x62\x23\x3f\x10\x93\x18\xae\x72\x1a\x9d\xf9\x48\xcb\xcc\x5a\x65\xc7\x4a\x04\xf0\xf3\xd5\xd5\x05\x8a\x41\x08\xbc\x86\x86\x43\x51\x6c\xe0\x46\x57\xf6\x44\x40\x0d\xfb\xff\xa2\xc3\x7c\x3d\x39\x84\xdc\x09\x22\x64\x4f\x12\xd9\xba\xaa\xf6\xe3\xbd\x56\xdd\x91\x25\x6a\x14\x9c\x89\x34\x8e\x31\xdf\xee\x15\x7e\x2f\x39\x81\x15\x2a\x28\x95\x66\x51\xf5\xfd\x83\xc5\xfe\x15\x07\xcf\xf7\x08\xee\x1d\x8e\xb6\xc5\x52\xcc\x8c\x5a\x93\x66\xc5\xd8\x79\x38\x46\xd6\xa7\x88\x37\xc9\x2e\xe3\xd2\xa5\x7b\x4b\x3a\xdc\xa1\xdc\x9e\x29\xf1\x8c\x8a\x99\x16\x47\x8d\xd4\x78\x8b\xf6\x1c\xe9\x71\x54\x1b\x69\xa8\x4a\x93\x37\xe5\xb2\x2c\x4f\x0c\x92\xab\xa0\x73\x32\x72\x59\xd3\xf0\x2d\x8d\xed\xca\x37\x16\x19\x9e\xdb\x1c\xab\x17\x49\xc3\x0f\x37\xdc\x88\xb1\xb4\xd4\x42\xcb\x58\x5e\x6a\x52\x0b\x15\x10\x0a\xb0\x04\xe7\xf8\x58\x32\x16\x01\xa6\xcd\x01\xb2\xc2\x69\x24\x35\x38\x6f\x30\x6a\xae\x1b\xb4\x71\xaa\xad\x1d\xa0\xd6\x20\x2d\x8b\x3c\xc6\x82\x62\x27\x34\x6d\x15\x84\x7b\x43\xb1\x35\x78\xa6\x24\x77\x28\xc1\x6e\xfc\xe9\x48\x74\xf4\x15\xe3\xe1\x84\x42\x88\x40\x7a\x26\x49\x3b\x48\xb1\xa4\x19\x8e\x0c\xa7\xb5\x01\x6c\x0c\x97\x61\x8a\xc2\x32\xd8\x8c\x44\x69\x24\xbf\x65\x1d\x74\xd6\xe5\x44\xef\xec\x48\x5e\xb7\x8a\xa3\x29\x8e\x41\x64\xce\x1f\x88\xdc\x00\x47\x4b\x40\x98\x6e\xd1\x0d\x8e\x48\x98\x63\x5c\x21\xb1\x4c\x05\x0a\x58\x98\xc5\x6d\x4f\x0a\x77\x53\x4f\x8b\xc4\x44\x1f\xb2\xdf\x8d\x3b\xea\x9f\xfe\xf6\xf2\xc5\xff\x5d\x7f\xfe\x9f\xfb\x67\x8f\xff\xf3\xe9\x69\xd1\xfe\xb3\xc7\xfd\x3c\xf8\x3f\x71\x94\x82\x23\xd1\x72\x00\xb7\x42\x99\x6c\xc0\x60\x7b\x0f\x79\xea\xa8\x53\x4b\x56\x31\xfa\x0b\x52\x9f\x96\xdb\xcd\x2f\xd7\x67\xcd\x04\x19\x85\xfe\xdb\x02\x9a\x59\x03\xad\x63\x3c\xea\xff\x2e\x18\xfd\x00\xd9\xe2\x56\x60\x59\x93\xb9\xb6\xb2\x3e\x3c\x2c\xab\x0f\xa7\xb2\x89\x43\xc7\xf6\xd5\xce\x2e\xad\xa6\xa9\xed\xa6\xc6\x5a\xb4\xa6\x67\xdf\x8c\x26\x7b\x50\x5a\x91\x08\x2e\x6d\xd4\x3a\xc1\x9d\xf2\xdb\xde\x1e\xb2\x2c\x6c\xa5\x64\xc9\x16\xb4\x90\xaa\x4a\xb7\x0c\xde\x13\xc3\x2a\x9a\x11\x9b\x7a\x1b\x3d\x95\x97\x37\x31\x6b\x69\x7e\x34\xc0\x67\x1f\x66\x19\x49\xef\xf1\x25\xf5\xac\x0e\xea\x0a\x28\x8d\x4d\x7e\xd9\x57\x4b\x49\xe5\xc6\xb3\x25\xfd\xe6\x57\x42\x25\xac\xcd\xcf\x36\x74\x8e\xca\x24\x47\xe7\x80\xa8\x92\x72\xbd\x3d\x84\x2d\x65\xe2\x82\x1a\x9c\xc4\x44\x92\x1b\x10\x79\x8a\xc4\x4a\x2f\x60\x51\x04\x81\xaa\xf0\xa3\x95\x27\xd7\x12\x7b\xa3\x96\x03\x45\x96\xc1\x8a\x07\xc9\xb2\xb0\x95\x52\x8c\xef\x48\x9c\xc6\x7e\x94\xca\xc2\x0e\x07\x12\x44\xa9\x20\x37\xf0\xae\x0f\x49\xa3\x96\x9d\x4b\x42\x7b\x70\x59\x14\xee\xe0\xb2\x0f\x49\xa3\x96\x4b\x97\xbf\x00\x5d\x4b\x4f\xfc\xbb\x2b\xee\x92\xb9\x17\xb5\xaa\xb8\x0b\x97\x17\x9b\x43\xfd\xd6\xc2\xb2\xc2\x2e\x29\xcf\xfd\x87\x4a\x55\xda\x25\x63\x1f\x5a\x65\x69\x2b\x2d\x3d\x67\xe9\x41\xae\x5e\xc1\x6e\x2b\xd4\xdb\x3e\xa8\xd3\x26\xd2\x48\x92\x24\xca\x61\x86\x8f\x8c\xbb\xf2\x8e\x91\xdf\x1f\x06\x19\x33\xf3\x03\x4d\xba\xcd\xe2\x2d\xfb\x69\xe9\x16\x15\x13\xd5\x56\x85\x4e\x3c\x5b\x8a\xbf\x25\x72\x83\xee\x5e\x20\x22\xf2\xc8\xaa\x7b\xdb\x8e\xe4\x29\x58\xca\x38\xb7\x3f\x2e\x59\xb8\xbd\xa8\x16\x16\xf7\xdb\x79\x51\x9f\x5a\xb4\x8d\x87\x3a\x6e\xbc\x3e\xc5\xb4\xcd\x58\xf9\xf5\x3c\xb9\x6f\x49\xaf\x57\xc1\xfa\x1c\x5d\x6d\x88\x8a\x8b\xd3\x28\xcc\xb7\xef\x10\x8a\x4a\x74\xa9\x4a\xa7\x62\xbf\x0d\x76\x23\x6f\x59\xd9\x31\xee\x40\x11\xfb\x28\xec\x8d\x22\x1c\x13\x5a\x64\x94\x23\x16\x60\xbb\xd2\x7c\xa0\x98\xb2\xe5\x6e\xbc\x54\x33\xe0\x3e\xb9\x52\x17\xdb\xb7\x1b\xc8\x12\x20\x8c\x23\xca\x64\x7e\x78\xa3\x62\x5b\x75\x56\xd9\x9e\x2a\x91\x27\xb0\x70\x34\x1f\x90\x89\xb5\x86\x73\x7e\x71\xda\x1e\xfb\x3a\x72\xdc\x5e\x79\x88\xcb\x74\x79\xd9\x64\xe4\xd4\xc2\x9e\xce\xb1\xfe\x85\x5a\xc0\xe9\x0c\x34\x3d\xd0\x43\xce\xa1\x36\x39\xd5\xa1\x4e\xf5\xf8\xb1\xa9\x23\x08\x75\x84\xac\x53\x6c\x3a\xc5\xa6\x53\x6c\x3a\xc5\xa6\x7f\xc5\xd8\xf4\x51\xfd\xff\x25\x4e\xfa\x33\x05\xbe\x9d\x60\xd2\x04\x93\x6a\x5f\x33\x9b\x98\x50\xd2\xe1\x50\x52\xc6\xcc\xdb\x38\x91\xdb\xe6\xaa\xa2\x8f\xa1\x6a\xa6\xd4\xc6\x56\xd6\x8c\x40\x02\x68\x48\xe8\x1a\xe1\x9a\xd9\x2e\xb7\x05\xc3\x34\xda\x2a\xbb\xcd\x12\x36\x98\x22\x50\x4c\xa1\x1b\xc5\xd5\x84\xf0\xbe\x24\x84\xf7\x2f\x22\x37\xef\x94\xd7\x9f\xa0\xde\x04\xf5\x26\xa8\x37\x41\x3d\x64\x40\x3d\xe5\xf2\xde\x60\x89\x27\xb4\x37\xa1\xbd\xda\xd7\xd2\x2c\x26\xc0\x37\x01\x3e\x1b\xef\x5f\x06\xe0\x6b\x7c\x5c\x91\x08\x26\x10\x38\x81\xc0\x09\x04\x76\x4a\x3d\x81\xc0\xbf\x12\x08\x4c\xb0\xdc\x7c\x99\x00\xd0\x75\x70\xb4\xf8\x5a\x7c\xea\xde\x3e\x39\x08\x30\x5a\x27\x35\xed\xb4\x65\xad\x69\x74\x10\x88\x79\xe2\x30\x52\x19\xd6\x04\x21\xa7\x95\xd5\x0e\x03\xf8\xda\x20\xd7\x84\xb4\x26\xa4\x35\x21\xad\x09\x69\x21\x03\x69\x51\x46\xff\xff\x18\x9b\x54\xed\x87\x47\x06\x9d\x4e\x73\x6e\x9a\xb3\xa9\xce\x83\x5e\x4b\xc6\x71\x20\x45\xd7\x72\xf5\x40\x72\x0e\x34\x6c\xf4\x6c\xf3\xba\x5e\x4b\x97\x0e\x52\xb8\xbe\x8b\x79\xa0\x10\x86\xa1\x75\xb0\x6f\xec\xc8\xf4\x3d\x4d\x7b\x86\xc2\x02\x31\x12\x51\xbf\x07\x94\xad\x10\xd6\x2e\x79\xcf\xe9\x1c\xf5\x1e\x31\x23\x5c\x18\xfb\x9c\xfb\x70\xe0\x62\xbd\xf7\xb5\x94\xcf\xf3\xf6\xfa\xc5\x4e\x9c\x85\x76\x1d\xae\x37\xbc\xde\xa3\x41\xcb\x29\xd0\x5e\x70\x67\x50\x93\x6d\x98\xa8\xd3\x67\x0f\x68\xb1\xeb\x38\x47\x07\x10\x1b\xd2\xe2\x18\x68\x6d\x40\xbb\xa3\x40\xba\x21\xf2\x8e\x81\xfb\xf6\x92\x77\x2f\x70\xe8\xdb\xb2\x36\xbf\x30\x91\xc5\x21\xe7\x45\xcc\x34\x0c\x48\x8e\xd0\xf2\x9b\x7c\x3c\xbd\x1c\x04\x3e\x07\xe8\x7c\x2f\x84\x7a\x48\x4d\x1f\xba\xe1\x76\x45\x7b\x60\xe0\x01\xca\xee\x04\xca\x31\xbe\x73\x5f\xa3\x70\x0c\xad\x1f\xa5\xf5\x76\xd5\xbb\xd2\x7e\xfb\x30\x90\xcf\xfa\x67\x7a\xe6\xc3\x37\x42\x19\xe2\xc9\x9c\x61\x4c\xe7\xd1\x77\x55\x86\x6e\x8f\x7b\x85\x42\x33\xa3\xaa\x57\xae\xfd\xd5\xcc\x9c\x56\x68\xe2\xde\x0e\xa8\x2c\xa9\xb0\x7d\xf0\x54\x2d\x80\xf2\x48\x39\x3d\x98\x1a\x6d\x0b\x9d\xba\x53\xfb\xce\xf8\xd1\x7e\xbb\x60\x4f\x06\xf5\xce\xda\xab\xeb\xca\xcb\xd5\xac\x20\xda\x72\x3b\xa2\x4b\x38\xd7\xb5\x89\xbe\x42\xd9\xb9\x73\xc4\x0c\x6d\xb7\xd9\xf8\x8d\xbd\x3e\x9c\xf5\x53\x68\x48\x14\x36\x8f\x09\xc5\x92\xf1\x21\xd1\x09\x07\x1c\xbe\xa7\x91\xf3\x6a\xc8\xc1\x57\xb0\xdd\xc5\xc6\x1d\xad\x76\x1d\xa8\x82\x0e\x4c\x38\xfe\xa5\x8c\xc5\x0a\x40\x5d\xa1\xbb\x98\xd1\xfb\x74\x61\xed\x1a\x98\xaf\x3c\x8c\x1e\xe3\xc2\x92\x29\x74\x3e\x99\xd0\xf9\x41\x50\xd0\x38\x4b\x57\x7e\x5b\x7a\x0e\xe6\xce\x4e\xd7\x19\x35\x57\xbb\x3c\x3c\xd2\x5e\x4f\x4b\x4c\xf7\x0f\x4d\x2b\x91\x5d\x94\xa6\x95\xc8\x69\x25\x72\x5a\x89\x7c\xb8\x95\xc8\x07\x80\x8c\xda\x9c\x64\x7b\xb7\x71\xdf\x57\x12\x4b\x9a\x1f\x72\x0c\x13\x03\xad\x3c\xd5\x4e\xde\x8e\x57\x13\x6d\x34\x86\xcf\x97\xe6\xa4\x68\xc4\xb0\xf6\xc9\xc2\xeb\x8d\x0b\xd7\xcd\xfe\xba\xa6\xf5\x30\xeb\x30\x33\xbe\xc7\x56\x27\xab\x08\xd9\x6d\xbb\x09\xee\x7c\x2d\xcf\xee\x87\x38\xac\xc8\xdd\x90\x9a\x58\x4a\x4e\x96\xa9\x79\x79\xf3\xde\x20\xf0\x96\xe3\x24\x19\xeb\xba\xf2\x53\x19\xab\x12\xaf\x47\xb3\xa0\x3e\xef\x9b\x8d\x6d\x6d\x7b\xde\x3b\x3b\x1a\xc0\x3f\x95\x7e\xed\x78\xfb\x76\xb8\xaf\xb3\xdd\xc5\xeb\x95\xed\x5a\x62\x41\x82\xb3\x54\x6e\x80\x4a\x92\x6f\x36\xbd\x34\xae\xde\x6f\xa4\xc0\xbc\x08\xe3\x84\xfc\x1d\xb6\xe3\xd0\x62\x38\x95\x9b\x57\xe7\x71\x12\x91\x80\xc8\x31\x69\x5e\x60\x21\x6e\x19\x0f\xc7\xa4\x79\x96\x28\x3e\x47\x54\x65\x41\x36\x08\x40\x88\x1f\x58\x08\x56\xaa\xd5\xbf\xaf\xad\x96\xd7\xd6\xcf\x87\xf5\x34\x0f\x71\x93\x6e\x26\xed\x98\x5b\x9f\x4f\xcf\x95\x34\xc6\xd7\x11\xfa\xb0\x81\x22\x1a\xdb\xdf\x8e\xdc\xc3\xb9\xf8\xdd\x5d\x3c\x74\xe6\xea\xb7\x8b\xbf\xf5\x6e\xb3\x46\x2e\x64\xf4\xab\x3c\x4e\xcf\x36\x1d\xfe\xfa\xb8\x36\xba\x8a\xd8\xad\xf6\xc6\x41\x2a\x37\x8c\x17\x0f\xda\xfe\xda\xe7\x65\xbc\x71\x2c\x36\x57\x8a\x47\x12\x4c\xf1\xbd\x77\x6b\xa4\x50\x7e\x77\x7b\x22\x60\x89\xef\xcd\xf5\xb9\x0c\x97\x79\x0d\x2b\x35\x43\xcb\x3d\x24\xf1\x78\xfc\xf8\xcb\x1f\x15\x06\xe2\x78\xd8\x51\x21\xd9\x1f\xf0\xf5\x8f\x86\xa4\x50\xfa\xb1\x47\x43\xa5\xdd\x69\x14\xe8\xa3\xc0\x86\x91\xa7\x81\x50\xb4\x7c\xc0\x81\x80\x77\x7a\x9f\xc6\xc2\xa9\x8c\x05\x33\xb0\x3b\x31\xa4\xf4\xd7\x1b\x26\x55\x97\x7c\x65\xf8\x69\x1a\x84\x8e\x41\x78\xd9\xec\xc5\x11\x16\x1e\x74\x91\xf5\x56\xf5\x57\x49\x47\x5c\x92\xa9\x1e\x99\x36\xf4\xdb\xb1\x0e\xd3\x78\x02\xb0\x9b\x25\xcb\xe9\xe9\x1d\x0d\x44\x01\x42\x08\x91\x64\xd9\xdd\x37\x08\x17\xef\xf9\xe5\x0f\xbd\x46\x91\xf5\xf9\x89\x92\x37\xdd\x89\x59\x44\x1f\x9c\xee\x34\x1e\xbe\x47\x83\x32\x72\x8e\x37\xdf\xac\x69\x38\xef\x75\xb0\xda\xdb\xac\x83\x94\x2f\x39\xa6\x62\x05\x1c\x25\x9c\x49\x16\xb0\xa8\x3c\xc7\x7e\x76\x71\x3e\x6f\xb5\x24\xe7\xe8\xb7\xb9\xc7\x6c\x43\x92\xee\x21\xd4\x17\xa1\x7f\xba\x35\xfe\xae\x39\xbc\xde\xba\x69\xd9\x8e\xe1\x62\xde\x64\x7d\x16\x88\x1b\xed\x29\x11\xfd\x4f\xa9\xff\x99\x90\xc4\xf6\xf4\xf9\x6e\xe9\x28\x23\xd7\xca\xe5\xee\xee\x9f\x63\xb1\x5b\xfb\x10\xd7\x2f\x1d\xf2\xe3\xbf\xb9\xb5\x6f\xa4\x6d\x7d\x25\x79\xfb\x24\x31\xea\x56\xbe\x5d\x53\xcd\x2d\x36\xa3\x6d\xdf\xab\x1c\xb8\x6d\x6f\xc0\x98\xa7\xdd\xaa\x86\x8c\x1d\x39\xa3\x9d\x70\x2b\x9b\x68\xd9\xfd\x33\xfe\xa9\xb6\x4a\x2e\x63\x0f\xcf\x68\x27\xd9\x4c\xb9\x46\x6d\xcb\xbe\xa1\xa8\xd6\x5f\xc6\xd6\x9f\xf1\x4f\xf4\xd4\xb4\x78\xd0\xd6\xf4\x13\x3c\x3b\xac\xd0\xdc\x90\x34\xda\xc9\xb4\x9a\x1a\x8d\xbd\x93\x87\xd4\xe2\x21\x1b\xb3\x2b\xd1\xbe\xe7\x69\xd4\x53\x67\xd5\x40\xa0\xe3\x19\x3f\x6d\x1a\xbc\x0e\x86\x3c\x10\xb4\x3d\x2a\xcd\x78\x32\xe6\xab\xbd\x36\xc9\xf4\x3a\x58\xae\xc3\xf4\x47\xea\xbf\xfb\x47\xff\x0d\x00\x00\xff\xff\xd2\x32\x5a\x28\x38\x9d\x00\x00") - -func v2SchemaJsonBytes() ([]byte, error) { - return bindataRead( - _v2SchemaJson, - "v2/schema.json", - ) -} - -func v2SchemaJson() (*asset, error) { - bytes, err := v2SchemaJsonBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "v2/schema.json", size: 40248, mode: os.FileMode(0640), modTime: time.Unix(1568964748, 0)} - a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xab, 0x88, 0x5e, 0xf, 0xbf, 0x17, 0x74, 0x0, 0xb2, 0x5a, 0x7f, 0xbc, 0x58, 0xcd, 0xc, 0x25, 0x73, 0xd5, 0x29, 0x1c, 0x7a, 0xd0, 0xce, 0x79, 0xd4, 0x89, 0x31, 0x27, 0x90, 0xf2, 0xff, 0xe6}} - return a, nil -} - -// Asset loads and returns the asset for the given name. -// It returns an error if the asset could not be found or -// could not be loaded. -func Asset(name string) ([]byte, error) { - canonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[canonicalName]; ok { - a, err := f() - if err != nil { - return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) - } - return a.bytes, nil - } - return nil, fmt.Errorf("Asset %s not found", name) -} - -// AssetString returns the asset contents as a string (instead of a []byte). -func AssetString(name string) (string, error) { - data, err := Asset(name) - return string(data), err -} - -// MustAsset is like Asset but panics when Asset would return an error. -// It simplifies safe initialization of global variables. -func MustAsset(name string) []byte { - a, err := Asset(name) - if err != nil { - panic("asset: Asset(" + name + "): " + err.Error()) - } - - return a -} - -// MustAssetString is like AssetString but panics when Asset would return an -// error. It simplifies safe initialization of global variables. -func MustAssetString(name string) string { - return string(MustAsset(name)) -} - -// AssetInfo loads and returns the asset info for the given name. -// It returns an error if the asset could not be found or -// could not be loaded. -func AssetInfo(name string) (os.FileInfo, error) { - canonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[canonicalName]; ok { - a, err := f() - if err != nil { - return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) - } - return a.info, nil - } - return nil, fmt.Errorf("AssetInfo %s not found", name) -} - -// AssetDigest returns the digest of the file with the given name. It returns an -// error if the asset could not be found or the digest could not be loaded. -func AssetDigest(name string) ([sha256.Size]byte, error) { - canonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[canonicalName]; ok { - a, err := f() - if err != nil { - return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s can't read by error: %v", name, err) - } - return a.digest, nil - } - return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s not found", name) -} - -// Digests returns a map of all known files and their checksums. -func Digests() (map[string][sha256.Size]byte, error) { - mp := make(map[string][sha256.Size]byte, len(_bindata)) - for name := range _bindata { - a, err := _bindata[name]() - if err != nil { - return nil, err - } - mp[name] = a.digest - } - return mp, nil -} - -// AssetNames returns the names of the assets. -func AssetNames() []string { - names := make([]string, 0, len(_bindata)) - for name := range _bindata { - names = append(names, name) - } - return names -} - -// _bindata is a table, holding each asset generator, mapped to its name. -var _bindata = map[string]func() (*asset, error){ - "jsonschema-draft-04.json": jsonschemaDraft04Json, - - "v2/schema.json": v2SchemaJson, -} - -// AssetDir returns the file names below a certain -// directory embedded in the file by go-bindata. -// For example if you run go-bindata on data/... and data contains the -// following hierarchy: -// data/ -// foo.txt -// img/ -// a.png -// b.png -// then AssetDir("data") would return []string{"foo.txt", "img"}, -// AssetDir("data/img") would return []string{"a.png", "b.png"}, -// AssetDir("foo.txt") and AssetDir("notexist") would return an error, and -// AssetDir("") will return []string{"data"}. -func AssetDir(name string) ([]string, error) { - node := _bintree - if len(name) != 0 { - canonicalName := strings.Replace(name, "\\", "/", -1) - pathList := strings.Split(canonicalName, "/") - for _, p := range pathList { - node = node.Children[p] - if node == nil { - return nil, fmt.Errorf("Asset %s not found", name) - } - } - } - if node.Func != nil { - return nil, fmt.Errorf("Asset %s not found", name) - } - rv := make([]string, 0, len(node.Children)) - for childName := range node.Children { - rv = append(rv, childName) - } - return rv, nil -} - -type bintree struct { - Func func() (*asset, error) - Children map[string]*bintree -} - -var _bintree = &bintree{nil, map[string]*bintree{ - "jsonschema-draft-04.json": {jsonschemaDraft04Json, map[string]*bintree{}}, - "v2": {nil, map[string]*bintree{ - "schema.json": {v2SchemaJson, map[string]*bintree{}}, - }}, -}} - -// RestoreAsset restores an asset under the given directory. -func RestoreAsset(dir, name string) error { - data, err := Asset(name) - if err != nil { - return err - } - info, err := AssetInfo(name) - if err != nil { - return err - } - err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) - if err != nil { - return err - } - err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) - if err != nil { - return err - } - return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) -} - -// RestoreAssets restores an asset under the given directory recursively. -func RestoreAssets(dir, name string) error { - children, err := AssetDir(name) - // File - if err != nil { - return RestoreAsset(dir, name) - } - // Dir - for _, child := range children { - err = RestoreAssets(dir, filepath.Join(name, child)) - if err != nil { - return err - } - } - return nil -} - -func _filePath(dir, name string) string { - canonicalName := strings.Replace(name, "\\", "/", -1) - return filepath.Join(append([]string{dir}, strings.Split(canonicalName, "/")...)...) -} diff --git a/vendor/github.com/go-openapi/spec/embed.go b/vendor/github.com/go-openapi/spec/embed.go new file mode 100644 index 0000000000..1f4284750a --- /dev/null +++ b/vendor/github.com/go-openapi/spec/embed.go @@ -0,0 +1,17 @@ +package spec + +import ( + "embed" + "path" +) + +//go:embed schemas/*.json schemas/*/*.json +var assets embed.FS + +func jsonschemaDraft04JSONBytes() ([]byte, error) { + return assets.ReadFile(path.Join("schemas", "jsonschema-draft-04.json")) +} + +func v2SchemaJSONBytes() ([]byte, error) { + return assets.ReadFile(path.Join("schemas", "v2", "schema.json")) +} diff --git a/vendor/github.com/go-openapi/spec/expander.go b/vendor/github.com/go-openapi/spec/expander.go index d4ea889d44..b81a5699a0 100644 --- a/vendor/github.com/go-openapi/spec/expander.go +++ b/vendor/github.com/go-openapi/spec/expander.go @@ -27,7 +27,6 @@ import ( // all relative $ref's will be resolved from there. // // PathLoader injects a document loading method. By default, this resolves to the function provided by the SpecLoader package variable. -// type ExpandOptions struct { RelativeBase string // the path to the root document to expand. This is a file, not a directory SkipSchemas bool // do not expand schemas, just paths, parameters and responses @@ -58,7 +57,7 @@ func ExpandSpec(spec *Swagger, options *ExpandOptions) error { if !options.SkipSchemas { for key, definition := range spec.Definitions { parentRefs := make([]string, 0, 10) - parentRefs = append(parentRefs, fmt.Sprintf("#/definitions/%s", key)) + parentRefs = append(parentRefs, "#/definitions/"+key) def, err := expandSchema(definition, parentRefs, resolver, specBasePath) if resolver.shouldStopOnError(err) { @@ -103,15 +102,21 @@ const rootBase = ".root" // baseForRoot loads in the cache the root document and produces a fake ".root" base path entry // for further $ref resolution -// -// Setting the cache is optional and this parameter may safely be left to nil. func baseForRoot(root interface{}, cache ResolutionCache) string { + // cache the root document to resolve $ref's + normalizedBase := normalizeBase(rootBase) + if root == nil { - return "" + // ensure that we never leave a nil root: always cache the root base pseudo-document + cachedRoot, found := cache.Get(normalizedBase) + if found && cachedRoot != nil { + // the cache is already preloaded with a root + return normalizedBase + } + + root = map[string]interface{}{} } - // cache the root document to resolve $ref's - normalizedBase := normalizeBase(rootBase) cache.Set(normalizedBase, root) return normalizedBase @@ -208,7 +213,19 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, ba } if target.Ref.String() != "" { - return expandSchemaRef(target, parentRefs, resolver, basePath) + if !resolver.options.SkipSchemas { + return expandSchemaRef(target, parentRefs, resolver, basePath) + } + + // when "expand" with SkipSchema, we just rebase the existing $ref without replacing + // the full schema. + rebasedRef, err := NewRef(normalizeURI(target.Ref.String(), basePath)) + if err != nil { + return nil, err + } + target.Ref = denormalizeRef(&rebasedRef, resolver.context.basePath, resolver.context.rootID) + + return &target, nil } for k := range target.Definitions { @@ -520,21 +537,25 @@ func getRefAndSchema(input interface{}) (*Ref, *Schema, error) { } func expandParameterOrResponse(input interface{}, resolver *schemaLoader, basePath string) error { - ref, _, err := getRefAndSchema(input) + ref, sch, err := getRefAndSchema(input) if err != nil { return err } - if ref == nil { + if ref == nil && sch == nil { // nothing to do return nil } parentRefs := make([]string, 0, 10) - if err = resolver.deref(input, parentRefs, basePath); resolver.shouldStopOnError(err) { - return err + if ref != nil { + // dereference this $ref + if err = resolver.deref(input, parentRefs, basePath); resolver.shouldStopOnError(err) { + return err + } + + ref, sch, _ = getRefAndSchema(input) } - ref, sch, _ := getRefAndSchema(input) if ref.String() != "" { transitiveResolver := resolver.transitiveResolver(basePath, *ref) basePath = resolver.updateBasePath(transitiveResolver, basePath) @@ -546,6 +567,7 @@ func expandParameterOrResponse(input interface{}, resolver *schemaLoader, basePa if ref != nil { *ref = Ref{} } + return nil } @@ -555,38 +577,29 @@ func expandParameterOrResponse(input interface{}, resolver *schemaLoader, basePa return ern } - switch { - case resolver.isCircular(&rebasedRef, basePath, parentRefs...): + if resolver.isCircular(&rebasedRef, basePath, parentRefs...) { // this is a circular $ref: stop expansion if !resolver.options.AbsoluteCircularRef { sch.Ref = denormalizeRef(&rebasedRef, resolver.context.basePath, resolver.context.rootID) } else { sch.Ref = rebasedRef } - case !resolver.options.SkipSchemas: - // schema expanded to a $ref in another root - sch.Ref = rebasedRef - debugLog("rebased to: %s", sch.Ref.String()) - default: - // skip schema expansion but rebase $ref to schema - sch.Ref = denormalizeRef(&rebasedRef, resolver.context.basePath, resolver.context.rootID) } } + // $ref expansion or rebasing is performed by expandSchema below if ref != nil { *ref = Ref{} } // expand schema - if !resolver.options.SkipSchemas { - s, err := expandSchema(*sch, parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return err - } - if s == nil { - // guard for when continuing on error - return nil - } + // yes, we do it even if options.SkipSchema is true: we have to go down that rabbit hole and rebase nested $ref) + s, err := expandSchema(*sch, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return err + } + + if s != nil { // guard for when continuing on error *sch = *s } diff --git a/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go b/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go index 2df0723154..f19f1a8fb6 100644 --- a/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go +++ b/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go @@ -40,5 +40,5 @@ func repairURI(in string) (*url.URL, string) { return u, "" } -func fixWindowsURI(u *url.URL, in string) { +func fixWindowsURI(_ *url.URL, _ string) { } diff --git a/vendor/github.com/go-openapi/spec/operation.go b/vendor/github.com/go-openapi/spec/operation.go index 995ce6acb1..a69cca8814 100644 --- a/vendor/github.com/go-openapi/spec/operation.go +++ b/vendor/github.com/go-openapi/spec/operation.go @@ -217,9 +217,12 @@ func (o *Operation) AddParam(param *Parameter) *Operation { for i, p := range o.Parameters { if p.Name == param.Name && p.In == param.In { - params := append(o.Parameters[:i], *param) + params := make([]Parameter, 0, len(o.Parameters)+1) + params = append(params, o.Parameters[:i]...) + params = append(params, *param) params = append(params, o.Parameters[i+1:]...) o.Parameters = params + return o } } diff --git a/vendor/github.com/go-openapi/spec/parameter.go b/vendor/github.com/go-openapi/spec/parameter.go index 2b2b89b67b..bd4f1cdb07 100644 --- a/vendor/github.com/go-openapi/spec/parameter.go +++ b/vendor/github.com/go-openapi/spec/parameter.go @@ -84,27 +84,27 @@ type ParamProps struct { // Parameter a unique parameter is defined by a combination of a [name](#parameterName) and [location](#parameterIn). // // There are five possible parameter types. -// * Path - Used together with [Path Templating](#pathTemplating), where the parameter value is actually part -// of the operation's URL. This does not include the host or base path of the API. For example, in `/items/{itemId}`, -// the path parameter is `itemId`. -// * Query - Parameters that are appended to the URL. For example, in `/items?id=###`, the query parameter is `id`. -// * Header - Custom headers that are expected as part of the request. -// * Body - The payload that's appended to the HTTP request. Since there can only be one payload, there can only be -// _one_ body parameter. The name of the body parameter has no effect on the parameter itself and is used for -// documentation purposes only. Since Form parameters are also in the payload, body and form parameters cannot exist -// together for the same operation. -// * Form - Used to describe the payload of an HTTP request when either `application/x-www-form-urlencoded` or -// `multipart/form-data` are used as the content type of the request (in Swagger's definition, -// the [`consumes`](#operationConsumes) property of an operation). This is the only parameter type that can be used -// to send files, thus supporting the `file` type. Since form parameters are sent in the payload, they cannot be -// declared together with a body parameter for the same operation. Form parameters have a different format based on -// the content-type used (for further details, consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4). -// * `application/x-www-form-urlencoded` - Similar to the format of Query parameters but as a payload. -// For example, `foo=1&bar=swagger` - both `foo` and `bar` are form parameters. This is normally used for simple -// parameters that are being transferred. -// * `multipart/form-data` - each parameter takes a section in the payload with an internal header. -// For example, for the header `Content-Disposition: form-data; name="submit-name"` the name of the parameter is -// `submit-name`. This type of form parameters is more commonly used for file transfers. +// - Path - Used together with [Path Templating](#pathTemplating), where the parameter value is actually part +// of the operation's URL. This does not include the host or base path of the API. For example, in `/items/{itemId}`, +// the path parameter is `itemId`. +// - Query - Parameters that are appended to the URL. For example, in `/items?id=###`, the query parameter is `id`. +// - Header - Custom headers that are expected as part of the request. +// - Body - The payload that's appended to the HTTP request. Since there can only be one payload, there can only be +// _one_ body parameter. The name of the body parameter has no effect on the parameter itself and is used for +// documentation purposes only. Since Form parameters are also in the payload, body and form parameters cannot exist +// together for the same operation. +// - Form - Used to describe the payload of an HTTP request when either `application/x-www-form-urlencoded` or +// `multipart/form-data` are used as the content type of the request (in Swagger's definition, +// the [`consumes`](#operationConsumes) property of an operation). This is the only parameter type that can be used +// to send files, thus supporting the `file` type. Since form parameters are sent in the payload, they cannot be +// declared together with a body parameter for the same operation. Form parameters have a different format based on +// the content-type used (for further details, consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4). +// - `application/x-www-form-urlencoded` - Similar to the format of Query parameters but as a payload. +// For example, `foo=1&bar=swagger` - both `foo` and `bar` are form parameters. This is normally used for simple +// parameters that are being transferred. +// - `multipart/form-data` - each parameter takes a section in the payload with an internal header. +// For example, for the header `Content-Disposition: form-data; name="submit-name"` the name of the parameter is +// `submit-name`. This type of form parameters is more commonly used for file transfers. // // For more information: http://goo.gl/8us55a#parameterObject type Parameter struct { diff --git a/vendor/github.com/go-openapi/spec/schema_loader.go b/vendor/github.com/go-openapi/spec/schema_loader.go index b81175afdf..0059b99aed 100644 --- a/vendor/github.com/go-openapi/spec/schema_loader.go +++ b/vendor/github.com/go-openapi/spec/schema_loader.go @@ -168,14 +168,7 @@ func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error) normalized := normalizeBase(pth) debugLog("loading doc from: %s", normalized) - unescaped, err := url.PathUnescape(normalized) - if err != nil { - return nil, url.URL{}, false, err - } - - u := url.URL{Path: unescaped} - - data, fromCache := r.cache.Get(u.RequestURI()) + data, fromCache := r.cache.Get(normalized) if fromCache { return data, toFetch, fromCache, nil } diff --git a/vendor/github.com/go-openapi/spec/schemas/jsonschema-draft-04.json b/vendor/github.com/go-openapi/spec/schemas/jsonschema-draft-04.json new file mode 100644 index 0000000000..bcbb84743e --- /dev/null +++ b/vendor/github.com/go-openapi/spec/schemas/jsonschema-draft-04.json @@ -0,0 +1,149 @@ +{ + "id": "http://json-schema.org/draft-04/schema#", + "$schema": "http://json-schema.org/draft-04/schema#", + "description": "Core schema meta-schema", + "definitions": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#" } + }, + "positiveInteger": { + "type": "integer", + "minimum": 0 + }, + "positiveIntegerDefault0": { + "allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ] + }, + "simpleTypes": { + "enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "minItems": 1, + "uniqueItems": true + } + }, + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "$schema": { + "type": "string" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": {}, + "multipleOf": { + "type": "number", + "minimum": 0, + "exclusiveMinimum": true + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "boolean", + "default": false + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "boolean", + "default": false + }, + "maxLength": { "$ref": "#/definitions/positiveInteger" }, + "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "additionalItems": { + "anyOf": [ + { "type": "boolean" }, + { "$ref": "#" } + ], + "default": {} + }, + "items": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/schemaArray" } + ], + "default": {} + }, + "maxItems": { "$ref": "#/definitions/positiveInteger" }, + "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "maxProperties": { "$ref": "#/definitions/positiveInteger" }, + "minProperties": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "required": { "$ref": "#/definitions/stringArray" }, + "additionalProperties": { + "anyOf": [ + { "type": "boolean" }, + { "$ref": "#" } + ], + "default": {} + }, + "definitions": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "properties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "dependencies": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/stringArray" } + ] + } + }, + "enum": { + "type": "array", + "minItems": 1, + "uniqueItems": true + }, + "type": { + "anyOf": [ + { "$ref": "#/definitions/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/definitions/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "format": { "type": "string" }, + "allOf": { "$ref": "#/definitions/schemaArray" }, + "anyOf": { "$ref": "#/definitions/schemaArray" }, + "oneOf": { "$ref": "#/definitions/schemaArray" }, + "not": { "$ref": "#" } + }, + "dependencies": { + "exclusiveMaximum": [ "maximum" ], + "exclusiveMinimum": [ "minimum" ] + }, + "default": {} +} diff --git a/vendor/github.com/go-openapi/spec/schemas/v2/schema.json b/vendor/github.com/go-openapi/spec/schemas/v2/schema.json new file mode 100644 index 0000000000..ebe10ed32d --- /dev/null +++ b/vendor/github.com/go-openapi/spec/schemas/v2/schema.json @@ -0,0 +1,1607 @@ +{ + "title": "A JSON Schema for Swagger 2.0 API.", + "id": "http://swagger.io/v2/schema.json#", + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "required": [ + "swagger", + "info", + "paths" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "swagger": { + "type": "string", + "enum": [ + "2.0" + ], + "description": "The Swagger version of this document." + }, + "info": { + "$ref": "#/definitions/info" + }, + "host": { + "type": "string", + "pattern": "^[^{}/ :\\\\]+(?::\\d+)?$", + "description": "The host (name or ip) of the API. Example: 'swagger.io'" + }, + "basePath": { + "type": "string", + "pattern": "^/", + "description": "The base path to the API. Example: '/api'." + }, + "schemes": { + "$ref": "#/definitions/schemesList" + }, + "consumes": { + "description": "A list of MIME types accepted by the API.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "produces": { + "description": "A list of MIME types the API can produce.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "paths": { + "$ref": "#/definitions/paths" + }, + "definitions": { + "$ref": "#/definitions/definitions" + }, + "parameters": { + "$ref": "#/definitions/parameterDefinitions" + }, + "responses": { + "$ref": "#/definitions/responseDefinitions" + }, + "security": { + "$ref": "#/definitions/security" + }, + "securityDefinitions": { + "$ref": "#/definitions/securityDefinitions" + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "definitions": { + "info": { + "type": "object", + "description": "General information about the API.", + "required": [ + "version", + "title" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "title": { + "type": "string", + "description": "A unique and precise title of the API." + }, + "version": { + "type": "string", + "description": "A semantic version number of the API." + }, + "description": { + "type": "string", + "description": "A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed." + }, + "termsOfService": { + "type": "string", + "description": "The terms of service for the API." + }, + "contact": { + "$ref": "#/definitions/contact" + }, + "license": { + "$ref": "#/definitions/license" + } + } + }, + "contact": { + "type": "object", + "description": "Contact information for the owners of the API.", + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The identifying name of the contact person/organization." + }, + "url": { + "type": "string", + "description": "The URL pointing to the contact information.", + "format": "uri" + }, + "email": { + "type": "string", + "description": "The email address of the contact person/organization.", + "format": "email" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "license": { + "type": "object", + "required": [ + "name" + ], + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The name of the license type. It's encouraged to use an OSI compatible license." + }, + "url": { + "type": "string", + "description": "The URL pointing to the license.", + "format": "uri" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "paths": { + "type": "object", + "description": "Relative paths to the individual endpoints. They must be relative to the 'basePath'.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + }, + "^/": { + "$ref": "#/definitions/pathItem" + } + }, + "additionalProperties": false + }, + "definitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "description": "One or more JSON objects describing the schemas being consumed and produced by the API." + }, + "parameterDefinitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/parameter" + }, + "description": "One or more JSON representations for parameters" + }, + "responseDefinitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/response" + }, + "description": "One or more JSON representations for responses" + }, + "externalDocs": { + "type": "object", + "additionalProperties": false, + "description": "information about external documentation", + "required": [ + "url" + ], + "properties": { + "description": { + "type": "string" + }, + "url": { + "type": "string", + "format": "uri" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "examples": { + "type": "object", + "additionalProperties": true + }, + "mimeType": { + "type": "string", + "description": "The MIME type of the HTTP message." + }, + "operation": { + "type": "object", + "required": [ + "responses" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "summary": { + "type": "string", + "description": "A brief summary of the operation." + }, + "description": { + "type": "string", + "description": "A longer description of the operation, GitHub Flavored Markdown is allowed." + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "operationId": { + "type": "string", + "description": "A unique identifier of the operation." + }, + "produces": { + "description": "A list of MIME types the API can produce.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "consumes": { + "description": "A list of MIME types the API can consume.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "parameters": { + "$ref": "#/definitions/parametersList" + }, + "responses": { + "$ref": "#/definitions/responses" + }, + "schemes": { + "$ref": "#/definitions/schemesList" + }, + "deprecated": { + "type": "boolean", + "default": false + }, + "security": { + "$ref": "#/definitions/security" + } + } + }, + "pathItem": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "$ref": { + "type": "string" + }, + "get": { + "$ref": "#/definitions/operation" + }, + "put": { + "$ref": "#/definitions/operation" + }, + "post": { + "$ref": "#/definitions/operation" + }, + "delete": { + "$ref": "#/definitions/operation" + }, + "options": { + "$ref": "#/definitions/operation" + }, + "head": { + "$ref": "#/definitions/operation" + }, + "patch": { + "$ref": "#/definitions/operation" + }, + "parameters": { + "$ref": "#/definitions/parametersList" + } + } + }, + "responses": { + "type": "object", + "description": "Response objects names can either be any valid HTTP status code or 'default'.", + "minProperties": 1, + "additionalProperties": false, + "patternProperties": { + "^([0-9]{3})$|^(default)$": { + "$ref": "#/definitions/responseValue" + }, + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "not": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + } + }, + "responseValue": { + "oneOf": [ + { + "$ref": "#/definitions/response" + }, + { + "$ref": "#/definitions/jsonReference" + } + ] + }, + "response": { + "type": "object", + "required": [ + "description" + ], + "properties": { + "description": { + "type": "string" + }, + "schema": { + "oneOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "$ref": "#/definitions/fileSchema" + } + ] + }, + "headers": { + "$ref": "#/definitions/headers" + }, + "examples": { + "$ref": "#/definitions/examples" + } + }, + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "headers": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/header" + } + }, + "header": { + "type": "object", + "additionalProperties": false, + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "string", + "number", + "integer", + "boolean", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "vendorExtension": { + "description": "Any property starting with x- is valid.", + "additionalProperties": true, + "additionalItems": true + }, + "bodyParameter": { + "type": "object", + "required": [ + "name", + "in", + "schema" + ], + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "body" + ] + }, + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "schema": { + "$ref": "#/definitions/schema" + } + }, + "additionalProperties": false + }, + "headerParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "header" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "queryParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "query" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "allowEmptyValue": { + "type": "boolean", + "default": false, + "description": "allows sending a parameter by name only or with an empty value." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormatWithMulti" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "formDataParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "formData" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "allowEmptyValue": { + "type": "boolean", + "default": false, + "description": "allows sending a parameter by name only or with an empty value." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array", + "file" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormatWithMulti" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "pathParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "required": [ + "required" + ], + "properties": { + "required": { + "type": "boolean", + "enum": [ + true + ], + "description": "Determines whether or not this parameter is required or optional." + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "path" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "nonBodyParameter": { + "type": "object", + "required": [ + "name", + "in", + "type" + ], + "oneOf": [ + { + "$ref": "#/definitions/headerParameterSubSchema" + }, + { + "$ref": "#/definitions/formDataParameterSubSchema" + }, + { + "$ref": "#/definitions/queryParameterSubSchema" + }, + { + "$ref": "#/definitions/pathParameterSubSchema" + } + ] + }, + "parameter": { + "oneOf": [ + { + "$ref": "#/definitions/bodyParameter" + }, + { + "$ref": "#/definitions/nonBodyParameter" + } + ] + }, + "schema": { + "type": "object", + "description": "A deterministic version of a JSON Schema object.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "$ref": { + "type": "string" + }, + "format": { + "type": "string" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "multipleOf": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" + }, + "maximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" + }, + "exclusiveMaximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" + }, + "minimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" + }, + "exclusiveMinimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" + }, + "maxLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "pattern": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" + }, + "maxItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "uniqueItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" + }, + "maxProperties": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minProperties": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "required": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" + }, + "enum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" + }, + "additionalProperties": { + "anyOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "boolean" + } + ], + "default": {} + }, + "type": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/type" + }, + "items": { + "anyOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + } + ], + "default": {} + }, + "allOf": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + }, + "properties": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "default": {} + }, + "discriminator": { + "type": "string" + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "xml": { + "$ref": "#/definitions/xml" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "example": {} + }, + "additionalProperties": false + }, + "fileSchema": { + "type": "object", + "description": "A deterministic version of a JSON Schema object.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "required": [ + "type" + ], + "properties": { + "format": { + "type": "string" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "required": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" + }, + "type": { + "type": "string", + "enum": [ + "file" + ] + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "example": {} + }, + "additionalProperties": false + }, + "primitivesItems": { + "type": "object", + "additionalProperties": false, + "properties": { + "type": { + "type": "string", + "enum": [ + "string", + "number", + "integer", + "boolean", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "security": { + "type": "array", + "items": { + "$ref": "#/definitions/securityRequirement" + }, + "uniqueItems": true + }, + "securityRequirement": { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + } + }, + "xml": { + "type": "object", + "additionalProperties": false, + "properties": { + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "prefix": { + "type": "string" + }, + "attribute": { + "type": "boolean", + "default": false + }, + "wrapped": { + "type": "boolean", + "default": false + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "tag": { + "type": "object", + "additionalProperties": false, + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "securityDefinitions": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "$ref": "#/definitions/basicAuthenticationSecurity" + }, + { + "$ref": "#/definitions/apiKeySecurity" + }, + { + "$ref": "#/definitions/oauth2ImplicitSecurity" + }, + { + "$ref": "#/definitions/oauth2PasswordSecurity" + }, + { + "$ref": "#/definitions/oauth2ApplicationSecurity" + }, + { + "$ref": "#/definitions/oauth2AccessCodeSecurity" + } + ] + } + }, + "basicAuthenticationSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "basic" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "apiKeySecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "name", + "in" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "apiKey" + ] + }, + "name": { + "type": "string" + }, + "in": { + "type": "string", + "enum": [ + "header", + "query" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2ImplicitSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "authorizationUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "implicit" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "authorizationUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2PasswordSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "password" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2ApplicationSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "application" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2AccessCodeSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "authorizationUrl", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "accessCode" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "authorizationUrl": { + "type": "string", + "format": "uri" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2Scopes": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "mediaTypeList": { + "type": "array", + "items": { + "$ref": "#/definitions/mimeType" + }, + "uniqueItems": true + }, + "parametersList": { + "type": "array", + "description": "The parameters needed to send a valid API call.", + "additionalItems": false, + "items": { + "oneOf": [ + { + "$ref": "#/definitions/parameter" + }, + { + "$ref": "#/definitions/jsonReference" + } + ] + }, + "uniqueItems": true + }, + "schemesList": { + "type": "array", + "description": "The transfer protocol of the API.", + "items": { + "type": "string", + "enum": [ + "http", + "https", + "ws", + "wss" + ] + }, + "uniqueItems": true + }, + "collectionFormat": { + "type": "string", + "enum": [ + "csv", + "ssv", + "tsv", + "pipes" + ], + "default": "csv" + }, + "collectionFormatWithMulti": { + "type": "string", + "enum": [ + "csv", + "ssv", + "tsv", + "pipes", + "multi" + ], + "default": "csv" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "multipleOf": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" + }, + "maximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" + }, + "exclusiveMaximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" + }, + "minimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" + }, + "exclusiveMinimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" + }, + "maxLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "pattern": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" + }, + "maxItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "uniqueItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" + }, + "enum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" + }, + "jsonReference": { + "type": "object", + "required": [ + "$ref" + ], + "additionalProperties": false, + "properties": { + "$ref": { + "type": "string" + } + } + } + } +} diff --git a/vendor/github.com/go-openapi/spec/spec.go b/vendor/github.com/go-openapi/spec/spec.go index 7d38b6e625..876aa12759 100644 --- a/vendor/github.com/go-openapi/spec/spec.go +++ b/vendor/github.com/go-openapi/spec/spec.go @@ -26,7 +26,7 @@ import ( const ( // SwaggerSchemaURL the url for the swagger 2.0 schema to validate specs SwaggerSchemaURL = "http://swagger.io/v2/schema.json#" - // JSONSchemaURL the url for the json schema schema + // JSONSchemaURL the url for the json schema JSONSchemaURL = "http://json-schema.org/draft-04/schema#" ) @@ -41,7 +41,7 @@ func MustLoadJSONSchemaDraft04() *Schema { // JSONSchemaDraft04 loads the json schema document for json shema draft04 func JSONSchemaDraft04() (*Schema, error) { - b, err := Asset("jsonschema-draft-04.json") + b, err := jsonschemaDraft04JSONBytes() if err != nil { return nil, err } @@ -65,7 +65,7 @@ func MustLoadSwagger20Schema() *Schema { // Swagger20Schema loads the swagger 2.0 schema from the embedded assets func Swagger20Schema() (*Schema, error) { - b, err := Asset("v2/schema.json") + b, err := v2SchemaJSONBytes() if err != nil { return nil, err } diff --git a/vendor/github.com/go-openapi/spec/swagger.go b/vendor/github.com/go-openapi/spec/swagger.go index 44722ffd5a..1590fd1751 100644 --- a/vendor/github.com/go-openapi/spec/swagger.go +++ b/vendor/github.com/go-openapi/spec/swagger.go @@ -253,7 +253,7 @@ func (s SchemaOrBool) MarshalJSON() ([]byte, error) { // UnmarshalJSON converts this bool or schema object from a JSON structure func (s *SchemaOrBool) UnmarshalJSON(data []byte) error { var nw SchemaOrBool - if len(data) >= 4 { + if len(data) > 0 { if data[0] == '{' { var sch Schema if err := json.Unmarshal(data, &sch); err != nil { @@ -261,7 +261,7 @@ func (s *SchemaOrBool) UnmarshalJSON(data []byte) error { } nw.Schema = &sch } - nw.Allows = !(data[0] == 'f' && data[1] == 'a' && data[2] == 'l' && data[3] == 's' && data[4] == 'e') + nw.Allows = !bytes.Equal(data, []byte("false")) } *s = nw return nil diff --git a/vendor/github.com/go-openapi/spec/url_go18.go b/vendor/github.com/go-openapi/spec/url_go18.go deleted file mode 100644 index 60b7851536..0000000000 --- a/vendor/github.com/go-openapi/spec/url_go18.go +++ /dev/null @@ -1,8 +0,0 @@ -//go:build !go1.19 -// +build !go1.19 - -package spec - -import "net/url" - -var parseURL = url.Parse diff --git a/vendor/github.com/go-openapi/spec/url_go19.go b/vendor/github.com/go-openapi/spec/url_go19.go index 392e3e6395..5bdfe40bcc 100644 --- a/vendor/github.com/go-openapi/spec/url_go19.go +++ b/vendor/github.com/go-openapi/spec/url_go19.go @@ -1,6 +1,3 @@ -//go:build go1.19 -// +build go1.19 - package spec import "net/url" diff --git a/vendor/github.com/go-openapi/validate/.golangci.yml b/vendor/github.com/go-openapi/validate/.golangci.yml index 81818ca678..22f8d21cca 100644 --- a/vendor/github.com/go-openapi/validate/.golangci.yml +++ b/vendor/github.com/go-openapi/validate/.golangci.yml @@ -1,12 +1,14 @@ linters-settings: govet: check-shadowing: true + golint: + min-confidence: 0 gocyclo: - min-complexity: 50 + min-complexity: 45 maligned: suggest-new: true dupl: - threshold: 100 + threshold: 200 goconst: min-len: 2 min-occurrences: 3 @@ -15,36 +17,45 @@ linters: enable-all: true disable: - maligned + - unparam - lll + - gochecknoinits + - gochecknoglobals + - funlen - godox - gocognit - whitespace - wsl - - funlen - - gochecknoglobals - - gochecknoinits - - scopelint - wrapcheck - - exhaustivestruct - - exhaustive - - nlreturn - testpackage - - gci - - gofumpt - - goerr113 + - nlreturn - gomnd - - tparallel + - exhaustivestruct + - goerr113 + - errorlint - nestif - godot - - tparallel + - gofumpt - paralleltest - - cyclop # because we have gocyclo already - # TODO: review the linters below. We disabled them to make the CI pass first. - - ireturn + - tparallel + - thelper + - ifshort + - exhaustruct - varnamelen + - gci + - depguard + - errchkjson + - inamedparam + - nonamedreturns + - musttag + - ireturn - forcetypeassert - - thelper - # Disable deprecated linters. - # They will be removed from golangci-lint in future. + - cyclop + # deprecated linters + - deadcode - interfacer - - golint \ No newline at end of file + - scopelint + - varcheck + - structcheck + - golint + - nosnakecase diff --git a/vendor/github.com/go-openapi/validate/BENCHMARK.md b/vendor/github.com/go-openapi/validate/BENCHMARK.md new file mode 100644 index 0000000000..79cf6a077b --- /dev/null +++ b/vendor/github.com/go-openapi/validate/BENCHMARK.md @@ -0,0 +1,31 @@ +# Benchmark + +Validating the Kubernetes Swagger API + +## v0.22.6: 60,000,000 allocs +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/validate +cpu: AMD Ryzen 7 5800X 8-Core Processor +Benchmark_KubernetesSpec/validating_kubernetes_API-16 1 8549863982 ns/op 7067424936 B/op 59583275 allocs/op +``` + +## After refact PR: minor but noticable improvements: 25,000,000 allocs +``` +go test -bench Spec +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/validate +cpu: AMD Ryzen 7 5800X 8-Core Processor +Benchmark_KubernetesSpec/validating_kubernetes_API-16 1 4064535557 ns/op 3379715592 B/op 25320330 allocs/op +``` + +## After reduce GC pressure PR: 17,000,000 allocs +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/validate +cpu: AMD Ryzen 7 5800X 8-Core Processor +Benchmark_KubernetesSpec/validating_kubernetes_API-16 1 3758414145 ns/op 2593881496 B/op 17111373 allocs/op +``` diff --git a/vendor/github.com/go-openapi/validate/README.md b/vendor/github.com/go-openapi/validate/README.md index ea2d68cb68..e8e1bb218d 100644 --- a/vendor/github.com/go-openapi/validate/README.md +++ b/vendor/github.com/go-openapi/validate/README.md @@ -1,7 +1,5 @@ -# Validation helpers -[![Build Status](https://travis-ci.org/go-openapi/validate.svg?branch=master)](https://travis-ci.org/go-openapi/validate) -[![Build status](https://ci.appveyor.com/api/projects/status/d6epy6vipueyh5fs/branch/master?svg=true)](https://ci.appveyor.com/project/fredbi/validate/branch/master) -[![codecov](https://codecov.io/gh/go-openapi/validate/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/validate) +# Validation helpers [![Build Status](https://github.com/go-openapi/validate/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/validate/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/validate/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/validate) + [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) [![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/validate/master/LICENSE) [![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/validate.svg)](https://pkg.go.dev/github.com/go-openapi/validate) @@ -24,7 +22,7 @@ Reference can be found here: https://github.com/OAI/OpenAPI-Specification/blob/m * Minimum, Maximum, MultipleOf * FormatOf -[Documentation](https://godoc.org/github.com/go-openapi/validate) +[Documentation](https://pkg.go.dev/github.com/go-openapi/validate) ## FAQ diff --git a/vendor/github.com/go-openapi/validate/appveyor.yml b/vendor/github.com/go-openapi/validate/appveyor.yml deleted file mode 100644 index 89e5bccb3a..0000000000 --- a/vendor/github.com/go-openapi/validate/appveyor.yml +++ /dev/null @@ -1,32 +0,0 @@ -version: "0.1.{build}" - -clone_folder: C:\go-openapi\validate -shallow_clone: true # for startup speed -pull_requests: - do_not_increment_build_number: true - -#skip_tags: true -#skip_branch_with_pr: true - -# appveyor.yml -build: off - -environment: - GOPATH: c:\gopath - -stack: go 1.15 - -test_script: - - go test -v -timeout 20m -args -enable-long ./... - -deploy: off - -notifications: - - provider: Slack - incoming_webhook: https://hooks.slack.com/services/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 - auth_token: - secure: Sf7kZf7ZGbnwWUMpffHwMu5A0cHkLK2MYY32LNTPj4+/3qC3Ghl7+9v4TSLOqOlCwdRNjOGblAq7s+GDJed6/xgRQl1JtCi1klzZNrYX4q01pgTPvvGcwbBkIYgeMaPeIRcK9OZnud7sRXdttozgTOpytps2U6Js32ip7uj5mHSg2ub0FwoSJwlS6dbezZ8+eDhoha0F/guY99BEwx8Bd+zROrT2TFGsSGOFGN6wFc7moCqTHO/YkWib13a2QNXqOxCCVBy/lt76Wp+JkeFppjHlzs/2lP3EAk13RIUAaesdEUHvIHrzCyNJEd3/+KO2DzsWOYfpktd+KBCvgaYOsoo7ubdT3IROeAegZdCgo/6xgCEsmFc9ZcqCfN5yNx2A+BZ2Vwmpws+bQ1E1+B5HDzzaiLcYfG4X2O210QVGVDLWsv1jqD+uPYeHY2WRfh5ZsIUFvaqgUEnwHwrK44/8REAhQavt1QAj5uJpsRd7CkRVPWRNK+yIky+wgbVUFEchRNmS55E7QWf+W4+4QZkQi7vUTMc9nbTUu2Es9NfvfudOpM2wZbn98fjpb/qq/nRv6Bk+ca+7XD5/IgNLMbWp2ouDdzbiHLCOfDUiHiDJhLfFZx9Bwo7ZwfzeOlbrQX66bx7xRKYmOe4DLrXhNcpbsMa8qbfxlZRCmYbubB/Y8h4= - channel: bots - on_build_success: false - on_build_failure: true - on_build_status_changed: true diff --git a/vendor/github.com/go-openapi/validate/default_validator.go b/vendor/github.com/go-openapi/validate/default_validator.go index bd14c2a269..e0dd93839e 100644 --- a/vendor/github.com/go-openapi/validate/default_validator.go +++ b/vendor/github.com/go-openapi/validate/default_validator.go @@ -25,48 +25,55 @@ import ( // According to Swagger spec, default values MUST validate their schema. type defaultValidator struct { SpecValidator *SpecValidator - visitedSchemas map[string]bool + visitedSchemas map[string]struct{} + schemaOptions *SchemaValidatorOptions } // resetVisited resets the internal state of visited schemas func (d *defaultValidator) resetVisited() { - d.visitedSchemas = map[string]bool{} + if d.visitedSchemas == nil { + d.visitedSchemas = make(map[string]struct{}) + + return + } + + // TODO(go1.21): clear(ex.visitedSchemas) + for k := range d.visitedSchemas { + delete(d.visitedSchemas, k) + } } -func isVisited(path string, visitedSchemas map[string]bool) bool { - found := visitedSchemas[path] - if !found { - // search for overlapping paths - frags := strings.Split(path, ".") - if len(frags) < 2 { - // shortcut exit on smaller paths - return found +func isVisited(path string, visitedSchemas map[string]struct{}) bool { + _, found := visitedSchemas[path] + if found { + return true + } + + // search for overlapping paths + var ( + parent string + suffix string + ) + for i := len(path) - 2; i >= 0; i-- { + r := path[i] + if r != '.' { + continue } - last := len(frags) - 1 - var currentFragStr, parent string - for i := range frags { - if i == 0 { - currentFragStr = frags[last] - } else { - currentFragStr = strings.Join([]string{frags[last-i], currentFragStr}, ".") - } - if i < last { - parent = strings.Join(frags[0:last-i], ".") - } else { - parent = "" - } - if strings.HasSuffix(parent, currentFragStr) { - found = true - break - } + + parent = path[0:i] + suffix = path[i+1:] + + if strings.HasSuffix(parent, suffix) { + return true } } - return found + + return false } // beingVisited asserts a schema is being visited func (d *defaultValidator) beingVisited(path string) { - d.visitedSchemas[path] = true + d.visitedSchemas[path] = struct{}{} } // isVisited tells if a path has already been visited @@ -75,8 +82,9 @@ func (d *defaultValidator) isVisited(path string) bool { } // Validate validates the default values declared in the swagger spec -func (d *defaultValidator) Validate() (errs *Result) { - errs = new(Result) +func (d *defaultValidator) Validate() *Result { + errs := pools.poolOfResults.BorrowResult() // will redeem when merged + if d == nil || d.SpecValidator == nil { return errs } @@ -89,7 +97,7 @@ func (d *defaultValidator) validateDefaultValueValidAgainstSchema() *Result { // every default value that is specified must validate against the schema for that property // headers, items, parameters, schema - res := new(Result) + res := pools.poolOfResults.BorrowResult() // will redeem when merged s := d.SpecValidator for method, pathItem := range s.expandedAnalyzer().Operations() { @@ -107,10 +115,12 @@ func (d *defaultValidator) validateDefaultValueValidAgainstSchema() *Result { // default values provided must validate against their inline definition (no explicit schema) if param.Default != nil && param.Schema == nil { // check param default value is valid - red := NewParamValidator(¶m, s.KnownFormats).Validate(param.Default) //#nosec + red := newParamValidator(¶m, s.KnownFormats, d.schemaOptions).Validate(param.Default) //#nosec if red.HasErrorsOrWarnings() { res.AddErrors(defaultValueDoesNotValidateMsg(param.Name, param.In)) res.Merge(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) } } @@ -120,6 +130,8 @@ func (d *defaultValidator) validateDefaultValueValidAgainstSchema() *Result { if red.HasErrorsOrWarnings() { res.AddErrors(defaultValueItemsDoesNotValidateMsg(param.Name, param.In)) res.Merge(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) } } @@ -129,6 +141,8 @@ func (d *defaultValidator) validateDefaultValueValidAgainstSchema() *Result { if red.HasErrorsOrWarnings() { res.AddErrors(defaultValueDoesNotValidateMsg(param.Name, param.In)) res.Merge(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) } } } @@ -154,7 +168,7 @@ func (d *defaultValidator) validateDefaultValueValidAgainstSchema() *Result { // reset explored schemas to get depth-first recursive-proof exploration d.resetVisited() for nm, sch := range s.spec.Spec().Definitions { - res.Merge(d.validateDefaultValueSchemaAgainstSchema(fmt.Sprintf("definitions.%s", nm), "body", &sch)) //#nosec + res.Merge(d.validateDefaultValueSchemaAgainstSchema("definitions."+nm, "body", &sch)) //#nosec } } return res @@ -170,17 +184,18 @@ func (d *defaultValidator) validateDefaultInResponse(resp *spec.Response, respon responseName, responseCodeAsStr := responseHelp.responseMsgVariants(responseType, responseCode) - // nolint: dupl if response.Headers != nil { // Safeguard for nm, h := range response.Headers { // reset explored schemas to get depth-first recursive-proof exploration d.resetVisited() if h.Default != nil { - red := NewHeaderValidator(nm, &h, s.KnownFormats).Validate(h.Default) //#nosec + red := newHeaderValidator(nm, &h, s.KnownFormats, d.schemaOptions).Validate(h.Default) //#nosec if red.HasErrorsOrWarnings() { res.AddErrors(defaultValueHeaderDoesNotValidateMsg(operationID, nm, responseName)) res.Merge(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) } } @@ -190,6 +205,8 @@ func (d *defaultValidator) validateDefaultInResponse(resp *spec.Response, respon if red.HasErrorsOrWarnings() { res.AddErrors(defaultValueHeaderItemsDoesNotValidateMsg(operationID, nm, responseName)) res.Merge(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) } } @@ -209,6 +226,8 @@ func (d *defaultValidator) validateDefaultInResponse(resp *spec.Response, respon // Additional message to make sure the context of the error is not lost res.AddErrors(defaultValueInDoesNotValidateMsg(operationID, responseName)) res.Merge(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) } } return res @@ -220,11 +239,13 @@ func (d *defaultValidator) validateDefaultValueSchemaAgainstSchema(path, in stri return nil } d.beingVisited(path) - res := new(Result) + res := pools.poolOfResults.BorrowResult() s := d.SpecValidator if schema.Default != nil { - res.Merge(NewSchemaValidator(schema, s.spec.Spec(), path+".default", s.KnownFormats, SwaggerSchema(true)).Validate(schema.Default)) + res.Merge( + newSchemaValidator(schema, s.spec.Spec(), path+".default", s.KnownFormats, d.schemaOptions).Validate(schema.Default), + ) } if schema.Items != nil { if schema.Items.Schema != nil { @@ -242,7 +263,7 @@ func (d *defaultValidator) validateDefaultValueSchemaAgainstSchema(path, in stri } if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil { // NOTE: we keep validating values, even though additionalItems is not supported by Swagger 2.0 (and 3.0 as well) - res.Merge(d.validateDefaultValueSchemaAgainstSchema(fmt.Sprintf("%s.additionalItems", path), in, schema.AdditionalItems.Schema)) + res.Merge(d.validateDefaultValueSchemaAgainstSchema(path+".additionalItems", in, schema.AdditionalItems.Schema)) } for propName, prop := range schema.Properties { res.Merge(d.validateDefaultValueSchemaAgainstSchema(path+"."+propName, in, &prop)) //#nosec @@ -251,7 +272,7 @@ func (d *defaultValidator) validateDefaultValueSchemaAgainstSchema(path, in stri res.Merge(d.validateDefaultValueSchemaAgainstSchema(path+"."+propName, in, &prop)) //#nosec } if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil { - res.Merge(d.validateDefaultValueSchemaAgainstSchema(fmt.Sprintf("%s.additionalProperties", path), in, schema.AdditionalProperties.Schema)) + res.Merge(d.validateDefaultValueSchemaAgainstSchema(path+".additionalProperties", in, schema.AdditionalProperties.Schema)) } if schema.AllOf != nil { for i, aoSch := range schema.AllOf { @@ -262,13 +283,15 @@ func (d *defaultValidator) validateDefaultValueSchemaAgainstSchema(path, in stri } // TODO: Temporary duplicated code. Need to refactor with examples -// nolint: dupl + func (d *defaultValidator) validateDefaultValueItemsAgainstSchema(path, in string, root interface{}, items *spec.Items) *Result { - res := new(Result) + res := pools.poolOfResults.BorrowResult() s := d.SpecValidator if items != nil { if items.Default != nil { - res.Merge(newItemsValidator(path, in, items, root, s.KnownFormats).Validate(0, items.Default)) + res.Merge( + newItemsValidator(path, in, items, root, s.KnownFormats, d.schemaOptions).Validate(0, items.Default), + ) } if items.Items != nil { res.Merge(d.validateDefaultValueItemsAgainstSchema(path+"[0].default", in, root, items.Items)) diff --git a/vendor/github.com/go-openapi/validate/doc.go b/vendor/github.com/go-openapi/validate/doc.go index f5ca9a5d58..d2b901eab9 100644 --- a/vendor/github.com/go-openapi/validate/doc.go +++ b/vendor/github.com/go-openapi/validate/doc.go @@ -19,7 +19,7 @@ as well as tools to validate data against their schema. This package follows Swagger 2.0. specification (aka OpenAPI 2.0). Reference can be found here: https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md. -Validating a specification +# Validating a specification Validates a spec document (from JSON or YAML) against the JSON schema for swagger, then checks a number of extra rules that can't be expressed in JSON schema. @@ -30,34 +30,36 @@ Entry points: - SpecValidator.Validate() Reported as errors: - [x] definition can't declare a property that's already defined by one of its ancestors - [x] definition's ancestor can't be a descendant of the same model - [x] path uniqueness: each api path should be non-verbatim (account for path param names) unique per method - [x] each security reference should contain only unique scopes - [x] each security scope in a security definition should be unique - [x] parameters in path must be unique - [x] each path parameter must correspond to a parameter placeholder and vice versa - [x] each referenceable definition must have references - [x] each definition property listed in the required array must be defined in the properties of the model - [x] each parameter should have a unique `name` and `type` combination - [x] each operation should have only 1 parameter of type body - [x] each reference must point to a valid object - [x] every default value that is specified must validate against the schema for that property - [x] items property is required for all schemas/definitions of type `array` - [x] path parameters must be declared a required - [x] headers must not contain $ref - [x] schema and property examples provided must validate against their respective object's schema - [x] examples provided must validate their schema + + [x] definition can't declare a property that's already defined by one of its ancestors + [x] definition's ancestor can't be a descendant of the same model + [x] path uniqueness: each api path should be non-verbatim (account for path param names) unique per method. Validation can be laxed by disabling StrictPathParamUniqueness. + [x] each security reference should contain only unique scopes + [x] each security scope in a security definition should be unique + [x] parameters in path must be unique + [x] each path parameter must correspond to a parameter placeholder and vice versa + [x] each referenceable definition must have references + [x] each definition property listed in the required array must be defined in the properties of the model + [x] each parameter should have a unique `name` and `type` combination + [x] each operation should have only 1 parameter of type body + [x] each reference must point to a valid object + [x] every default value that is specified must validate against the schema for that property + [x] items property is required for all schemas/definitions of type `array` + [x] path parameters must be declared a required + [x] headers must not contain $ref + [x] schema and property examples provided must validate against their respective object's schema + [x] examples provided must validate their schema Reported as warnings: - [x] path parameters should not contain any of [{,},\w] - [x] empty path - [x] unused definitions - [x] unsupported validation of examples on non-JSON media types - [x] examples in response without schema - [x] readOnly properties should not be required -Validating a schema + [x] path parameters should not contain any of [{,},\w] + [x] empty path + [x] unused definitions + [x] unsupported validation of examples on non-JSON media types + [x] examples in response without schema + [x] readOnly properties should not be required + +# Validating a schema The schema validation toolkit validates data against JSON-schema-draft 04 schema. @@ -70,16 +72,16 @@ Entry points: - AgainstSchema() - ... -Known limitations +# Known limitations With the current version of this package, the following aspects of swagger are not yet supported: - [ ] errors and warnings are not reported with key/line number in spec - [ ] default values and examples on responses only support application/json producer type - [ ] invalid numeric constraints (such as Minimum, etc..) are not checked except for default and example values - [ ] rules for collectionFormat are not implemented - [ ] no validation rule for polymorphism support (discriminator) [not done here] - [ ] valid js ECMA regexp not supported by Go regexp engine are considered invalid - [ ] arbitrary large numbers are not supported: max is math.MaxFloat64 + [ ] errors and warnings are not reported with key/line number in spec + [ ] default values and examples on responses only support application/json producer type + [ ] invalid numeric constraints (such as Minimum, etc..) are not checked except for default and example values + [ ] rules for collectionFormat are not implemented + [ ] no validation rule for polymorphism support (discriminator) [not done here] + [ ] valid js ECMA regexp not supported by Go regexp engine are considered invalid + [ ] arbitrary large numbers are not supported: max is math.MaxFloat64 */ package validate diff --git a/vendor/github.com/go-openapi/validate/example_validator.go b/vendor/github.com/go-openapi/validate/example_validator.go index c8bffd78e5..d08956973c 100644 --- a/vendor/github.com/go-openapi/validate/example_validator.go +++ b/vendor/github.com/go-openapi/validate/example_validator.go @@ -23,17 +23,27 @@ import ( // ExampleValidator validates example values defined in a spec type exampleValidator struct { SpecValidator *SpecValidator - visitedSchemas map[string]bool + visitedSchemas map[string]struct{} + schemaOptions *SchemaValidatorOptions } // resetVisited resets the internal state of visited schemas func (ex *exampleValidator) resetVisited() { - ex.visitedSchemas = map[string]bool{} + if ex.visitedSchemas == nil { + ex.visitedSchemas = make(map[string]struct{}) + + return + } + + // TODO(go1.21): clear(ex.visitedSchemas) + for k := range ex.visitedSchemas { + delete(ex.visitedSchemas, k) + } } // beingVisited asserts a schema is being visited func (ex *exampleValidator) beingVisited(path string) { - ex.visitedSchemas[path] = true + ex.visitedSchemas[path] = struct{}{} } // isVisited tells if a path has already been visited @@ -48,9 +58,9 @@ func (ex *exampleValidator) isVisited(path string) bool { // - schemas // - individual property // - responses -// -func (ex *exampleValidator) Validate() (errs *Result) { - errs = new(Result) +func (ex *exampleValidator) Validate() *Result { + errs := pools.poolOfResults.BorrowResult() + if ex == nil || ex.SpecValidator == nil { return errs } @@ -65,7 +75,7 @@ func (ex *exampleValidator) validateExampleValueValidAgainstSchema() *Result { // in: schemas, properties, object, items // not in: headers, parameters without schema - res := new(Result) + res := pools.poolOfResults.BorrowResult() s := ex.SpecValidator for method, pathItem := range s.expandedAnalyzer().Operations() { @@ -83,10 +93,12 @@ func (ex *exampleValidator) validateExampleValueValidAgainstSchema() *Result { // default values provided must validate against their inline definition (no explicit schema) if param.Example != nil && param.Schema == nil { // check param default value is valid - red := NewParamValidator(¶m, s.KnownFormats).Validate(param.Example) //#nosec + red := newParamValidator(¶m, s.KnownFormats, ex.schemaOptions).Validate(param.Example) //#nosec if red.HasErrorsOrWarnings() { res.AddWarnings(exampleValueDoesNotValidateMsg(param.Name, param.In)) res.MergeAsWarnings(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) } } @@ -96,6 +108,8 @@ func (ex *exampleValidator) validateExampleValueValidAgainstSchema() *Result { if red.HasErrorsOrWarnings() { res.AddWarnings(exampleValueItemsDoesNotValidateMsg(param.Name, param.In)) res.Merge(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) } } @@ -105,6 +119,8 @@ func (ex *exampleValidator) validateExampleValueValidAgainstSchema() *Result { if red.HasErrorsOrWarnings() { res.AddWarnings(exampleValueDoesNotValidateMsg(param.Name, param.In)) res.Merge(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) } } } @@ -130,7 +146,7 @@ func (ex *exampleValidator) validateExampleValueValidAgainstSchema() *Result { // reset explored schemas to get depth-first recursive-proof exploration ex.resetVisited() for nm, sch := range s.spec.Spec().Definitions { - res.Merge(ex.validateExampleValueSchemaAgainstSchema(fmt.Sprintf("definitions.%s", nm), "body", &sch)) //#nosec + res.Merge(ex.validateExampleValueSchemaAgainstSchema("definitions."+nm, "body", &sch)) //#nosec } } return res @@ -146,17 +162,18 @@ func (ex *exampleValidator) validateExampleInResponse(resp *spec.Response, respo responseName, responseCodeAsStr := responseHelp.responseMsgVariants(responseType, responseCode) - // nolint: dupl if response.Headers != nil { // Safeguard for nm, h := range response.Headers { // reset explored schemas to get depth-first recursive-proof exploration ex.resetVisited() if h.Example != nil { - red := NewHeaderValidator(nm, &h, s.KnownFormats).Validate(h.Example) //#nosec + red := newHeaderValidator(nm, &h, s.KnownFormats, ex.schemaOptions).Validate(h.Example) //#nosec if red.HasErrorsOrWarnings() { res.AddWarnings(exampleValueHeaderDoesNotValidateMsg(operationID, nm, responseName)) res.MergeAsWarnings(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) } } @@ -166,6 +183,8 @@ func (ex *exampleValidator) validateExampleInResponse(resp *spec.Response, respo if red.HasErrorsOrWarnings() { res.AddWarnings(exampleValueHeaderItemsDoesNotValidateMsg(operationID, nm, responseName)) res.MergeAsWarnings(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) } } @@ -185,13 +204,17 @@ func (ex *exampleValidator) validateExampleInResponse(resp *spec.Response, respo // Additional message to make sure the context of the error is not lost res.AddWarnings(exampleValueInDoesNotValidateMsg(operationID, responseName)) res.Merge(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) } } if response.Examples != nil { if response.Schema != nil { if example, ok := response.Examples["application/json"]; ok { - res.MergeAsWarnings(NewSchemaValidator(response.Schema, s.spec.Spec(), path+".examples", s.KnownFormats, SwaggerSchema(true)).Validate(example)) + res.MergeAsWarnings( + newSchemaValidator(response.Schema, s.spec.Spec(), path+".examples", s.KnownFormats, s.schemaOptions).Validate(example), + ) } else { // TODO: validate other media types too res.AddWarnings(examplesMimeNotSupportedMsg(operationID, responseName)) @@ -210,10 +233,12 @@ func (ex *exampleValidator) validateExampleValueSchemaAgainstSchema(path, in str } ex.beingVisited(path) s := ex.SpecValidator - res := new(Result) + res := pools.poolOfResults.BorrowResult() if schema.Example != nil { - res.MergeAsWarnings(NewSchemaValidator(schema, s.spec.Spec(), path+".example", s.KnownFormats, SwaggerSchema(true)).Validate(schema.Example)) + res.MergeAsWarnings( + newSchemaValidator(schema, s.spec.Spec(), path+".example", s.KnownFormats, ex.schemaOptions).Validate(schema.Example), + ) } if schema.Items != nil { if schema.Items.Schema != nil { @@ -231,7 +256,7 @@ func (ex *exampleValidator) validateExampleValueSchemaAgainstSchema(path, in str } if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil { // NOTE: we keep validating values, even though additionalItems is unsupported in Swagger 2.0 (and 3.0 as well) - res.Merge(ex.validateExampleValueSchemaAgainstSchema(fmt.Sprintf("%s.additionalItems", path), in, schema.AdditionalItems.Schema)) + res.Merge(ex.validateExampleValueSchemaAgainstSchema(path+".additionalItems", in, schema.AdditionalItems.Schema)) } for propName, prop := range schema.Properties { res.Merge(ex.validateExampleValueSchemaAgainstSchema(path+"."+propName, in, &prop)) //#nosec @@ -240,7 +265,7 @@ func (ex *exampleValidator) validateExampleValueSchemaAgainstSchema(path, in str res.Merge(ex.validateExampleValueSchemaAgainstSchema(path+"."+propName, in, &prop)) //#nosec } if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil { - res.Merge(ex.validateExampleValueSchemaAgainstSchema(fmt.Sprintf("%s.additionalProperties", path), in, schema.AdditionalProperties.Schema)) + res.Merge(ex.validateExampleValueSchemaAgainstSchema(path+".additionalProperties", in, schema.AdditionalProperties.Schema)) } if schema.AllOf != nil { for i, aoSch := range schema.AllOf { @@ -251,13 +276,16 @@ func (ex *exampleValidator) validateExampleValueSchemaAgainstSchema(path, in str } // TODO: Temporary duplicated code. Need to refactor with examples -// nolint: dupl +// + func (ex *exampleValidator) validateExampleValueItemsAgainstSchema(path, in string, root interface{}, items *spec.Items) *Result { - res := new(Result) + res := pools.poolOfResults.BorrowResult() s := ex.SpecValidator if items != nil { if items.Example != nil { - res.MergeAsWarnings(newItemsValidator(path, in, items, root, s.KnownFormats).Validate(0, items.Example)) + res.MergeAsWarnings( + newItemsValidator(path, in, items, root, s.KnownFormats, ex.schemaOptions).Validate(0, items.Example), + ) } if items.Items != nil { res.Merge(ex.validateExampleValueItemsAgainstSchema(path+"[0].example", in, root, items.Items)) @@ -266,5 +294,6 @@ func (ex *exampleValidator) validateExampleValueItemsAgainstSchema(path, in stri res.AddErrors(invalidPatternInMsg(path, in, items.Pattern)) } } + return res } diff --git a/vendor/github.com/go-openapi/validate/formats.go b/vendor/github.com/go-openapi/validate/formats.go index 0ad996cbbc..f4e3552130 100644 --- a/vendor/github.com/go-openapi/validate/formats.go +++ b/vendor/github.com/go-openapi/validate/formats.go @@ -22,10 +22,32 @@ import ( ) type formatValidator struct { - Format string Path string In string + Format string KnownFormats strfmt.Registry + Options *SchemaValidatorOptions +} + +func newFormatValidator(path, in, format string, formats strfmt.Registry, opts *SchemaValidatorOptions) *formatValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var f *formatValidator + if opts.recycleValidators { + f = pools.poolOfFormatValidators.BorrowValidator() + } else { + f = new(formatValidator) + } + + f.Path = path + f.In = in + f.Format = format + f.KnownFormats = formats + f.Options = opts + + return f } func (f *formatValidator) SetPath(path string) { @@ -33,37 +55,45 @@ func (f *formatValidator) SetPath(path string) { } func (f *formatValidator) Applies(source interface{}, kind reflect.Kind) bool { - doit := func() bool { - if source == nil { - return false - } - switch source := source.(type) { - case *spec.Items: - return kind == reflect.String && f.KnownFormats.ContainsName(source.Format) - case *spec.Parameter: - return kind == reflect.String && f.KnownFormats.ContainsName(source.Format) - case *spec.Schema: - return kind == reflect.String && f.KnownFormats.ContainsName(source.Format) - case *spec.Header: - return kind == reflect.String && f.KnownFormats.ContainsName(source.Format) - } + if source == nil || f.KnownFormats == nil { + return false + } + + switch source := source.(type) { + case *spec.Items: + return kind == reflect.String && f.KnownFormats.ContainsName(source.Format) + case *spec.Parameter: + return kind == reflect.String && f.KnownFormats.ContainsName(source.Format) + case *spec.Schema: + return kind == reflect.String && f.KnownFormats.ContainsName(source.Format) + case *spec.Header: + return kind == reflect.String && f.KnownFormats.ContainsName(source.Format) + default: return false } - r := doit() - debugLog("format validator for %q applies %t for %T (kind: %v)\n", f.Path, r, source, kind) - return r } func (f *formatValidator) Validate(val interface{}) *Result { - result := new(Result) - debugLog("validating \"%v\" against format: %s", val, f.Format) + if f.Options.recycleValidators { + defer func() { + f.redeem() + }() + } + + var result *Result + if f.Options.recycleResult { + result = pools.poolOfResults.BorrowResult() + } else { + result = new(Result) + } if err := FormatOf(f.Path, f.In, f.Format, val.(string), f.KnownFormats); err != nil { result.AddErrors(err) } - if result.HasErrors() { - return result - } - return nil + return result +} + +func (f *formatValidator) redeem() { + pools.poolOfFormatValidators.RedeemValidator(f) } diff --git a/vendor/github.com/go-openapi/validate/helpers.go b/vendor/github.com/go-openapi/validate/helpers.go index 48ebfab58e..757e403d91 100644 --- a/vendor/github.com/go-openapi/validate/helpers.go +++ b/vendor/github.com/go-openapi/validate/helpers.go @@ -101,9 +101,17 @@ type errorHelper struct { // A collection of unexported helpers for error construction } -func (h *errorHelper) sErr(err errors.Error) *Result { +func (h *errorHelper) sErr(err errors.Error, recycle bool) *Result { // Builds a Result from standard errors.Error - return &Result{Errors: []error{err}} + var result *Result + if recycle { + result = pools.poolOfResults.BorrowResult() + } else { + result = new(Result) + } + result.Errors = []error{err} + + return result } func (h *errorHelper) addPointerError(res *Result, err error, ref string, fromPath string) *Result { @@ -157,7 +165,7 @@ func (h *valueHelper) asInt64(val interface{}) int64 { // Number conversion function for int64, without error checking // (implements an implicit type upgrade). v := reflect.ValueOf(val) - switch v.Kind() { + switch v.Kind() { //nolint:exhaustive case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return v.Int() case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: @@ -174,7 +182,7 @@ func (h *valueHelper) asUint64(val interface{}) uint64 { // Number conversion function for uint64, without error checking // (implements an implicit type upgrade). v := reflect.ValueOf(val) - switch v.Kind() { + switch v.Kind() { //nolint:exhaustive case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return uint64(v.Int()) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: @@ -192,7 +200,7 @@ func (h *valueHelper) asFloat64(val interface{}) float64 { // Number conversion function for float64, without error checking // (implements an implicit type upgrade). v := reflect.ValueOf(val) - switch v.Kind() { + switch v.Kind() { //nolint:exhaustive case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return float64(v.Int()) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: @@ -225,7 +233,7 @@ func (h *paramHelper) safeExpandedParamsFor(path, method, operationID string, re operation.Parameters = resolvedParams for _, ppr := range s.expandedAnalyzer().SafeParamsFor(method, path, - func(p spec.Parameter, err error) bool { + func(_ spec.Parameter, err error) bool { // since params have already been expanded, there are few causes for error res.AddErrors(someParametersBrokenMsg(path, method, operationID)) // original error from analyzer @@ -250,7 +258,7 @@ func (h *paramHelper) resolveParam(path, method, operationID string, param *spec } if err != nil { // Safeguard - // NOTE: we may enter enter here when the whole parameter is an unresolved $ref + // NOTE: we may enter here when the whole parameter is an unresolved $ref refPath := strings.Join([]string{"\"" + path + "\"", method}, ".") errorHelp.addPointerError(res, err, param.Ref.String(), refPath) return nil, res @@ -306,6 +314,7 @@ func (r *responseHelper) expandResponseRef( errorHelp.addPointerError(res, err, response.Ref.String(), path) return nil, res } + return response, res } diff --git a/vendor/github.com/go-openapi/validate/object_validator.go b/vendor/github.com/go-openapi/validate/object_validator.go index 7bb12615d8..dff73fa98a 100644 --- a/vendor/github.com/go-openapi/validate/object_validator.go +++ b/vendor/github.com/go-openapi/validate/object_validator.go @@ -15,8 +15,8 @@ package validate import ( + "fmt" "reflect" - "regexp" "strings" "github.com/go-openapi/errors" @@ -35,62 +35,116 @@ type objectValidator struct { PatternProperties map[string]spec.Schema Root interface{} KnownFormats strfmt.Registry - Options SchemaValidatorOptions + Options *SchemaValidatorOptions + splitPath []string +} + +func newObjectValidator(path, in string, + maxProperties, minProperties *int64, required []string, properties spec.SchemaProperties, + additionalProperties *spec.SchemaOrBool, patternProperties spec.SchemaProperties, + root interface{}, formats strfmt.Registry, opts *SchemaValidatorOptions) *objectValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var v *objectValidator + if opts.recycleValidators { + v = pools.poolOfObjectValidators.BorrowValidator() + } else { + v = new(objectValidator) + } + + v.Path = path + v.In = in + v.MaxProperties = maxProperties + v.MinProperties = minProperties + v.Required = required + v.Properties = properties + v.AdditionalProperties = additionalProperties + v.PatternProperties = patternProperties + v.Root = root + v.KnownFormats = formats + v.Options = opts + v.splitPath = strings.Split(v.Path, ".") + + return v } func (o *objectValidator) SetPath(path string) { o.Path = path + o.splitPath = strings.Split(path, ".") } func (o *objectValidator) Applies(source interface{}, kind reflect.Kind) bool { // TODO: this should also work for structs // there is a problem in the type validator where it will be unhappy about null values // so that requires more testing - r := reflect.TypeOf(source) == specSchemaType && (kind == reflect.Map || kind == reflect.Struct) - debugLog("object validator for %q applies %t for %T (kind: %v)\n", o.Path, r, source, kind) - return r + _, isSchema := source.(*spec.Schema) + return isSchema && (kind == reflect.Map || kind == reflect.Struct) } func (o *objectValidator) isProperties() bool { - p := strings.Split(o.Path, ".") + p := o.splitPath return len(p) > 1 && p[len(p)-1] == jsonProperties && p[len(p)-2] != jsonProperties } func (o *objectValidator) isDefault() bool { - p := strings.Split(o.Path, ".") + p := o.splitPath return len(p) > 1 && p[len(p)-1] == jsonDefault && p[len(p)-2] != jsonDefault } func (o *objectValidator) isExample() bool { - p := strings.Split(o.Path, ".") + p := o.splitPath return len(p) > 1 && (p[len(p)-1] == swaggerExample || p[len(p)-1] == swaggerExamples) && p[len(p)-2] != swaggerExample } func (o *objectValidator) checkArrayMustHaveItems(res *Result, val map[string]interface{}) { // for swagger 2.0 schemas, there is an additional constraint to have array items defined explicitly. // with pure jsonschema draft 4, one may have arrays with undefined items (i.e. any type). - if t, typeFound := val[jsonType]; typeFound { - if tpe, ok := t.(string); ok && tpe == arrayType { - if item, itemsKeyFound := val[jsonItems]; !itemsKeyFound { - res.AddErrors(errors.Required(jsonItems, o.Path, item)) - } - } + if val == nil { + return + } + + t, typeFound := val[jsonType] + if !typeFound { + return + } + + tpe, isString := t.(string) + if !isString || tpe != arrayType { + return + } + + item, itemsKeyFound := val[jsonItems] + if itemsKeyFound { + return } + + res.AddErrors(errors.Required(jsonItems, o.Path, item)) } func (o *objectValidator) checkItemsMustBeTypeArray(res *Result, val map[string]interface{}) { - if !o.isProperties() && !o.isDefault() && !o.isExample() { - if _, itemsKeyFound := val[jsonItems]; itemsKeyFound { - t, typeFound := val[jsonType] - if typeFound { - if tpe, ok := t.(string); !ok || tpe != arrayType { - res.AddErrors(errors.InvalidType(o.Path, o.In, arrayType, nil)) - } - } else { - // there is no type - res.AddErrors(errors.Required(jsonType, o.Path, t)) - } - } + if val == nil { + return + } + + if o.isProperties() || o.isDefault() || o.isExample() { + return + } + + _, itemsKeyFound := val[jsonItems] + if !itemsKeyFound { + return + } + + t, typeFound := val[jsonType] + if !typeFound { + // there is no type + res.AddErrors(errors.Required(jsonType, o.Path, t)) + } + + if tpe, isString := t.(string); !isString || tpe != arrayType { + res.AddErrors(errors.InvalidType(o.Path, o.In, arrayType, nil)) } } @@ -104,176 +158,274 @@ func (o *objectValidator) precheck(res *Result, val map[string]interface{}) { } func (o *objectValidator) Validate(data interface{}) *Result { - val := data.(map[string]interface{}) - // TODO: guard against nil data + if o.Options.recycleValidators { + defer func() { + o.redeem() + }() + } + + var val map[string]interface{} + if data != nil { + var ok bool + val, ok = data.(map[string]interface{}) + if !ok { + return errorHelp.sErr(invalidObjectMsg(o.Path, o.In), o.Options.recycleResult) + } + } numKeys := int64(len(val)) if o.MinProperties != nil && numKeys < *o.MinProperties { - return errorHelp.sErr(errors.TooFewProperties(o.Path, o.In, *o.MinProperties)) + return errorHelp.sErr(errors.TooFewProperties(o.Path, o.In, *o.MinProperties), o.Options.recycleResult) } if o.MaxProperties != nil && numKeys > *o.MaxProperties { - return errorHelp.sErr(errors.TooManyProperties(o.Path, o.In, *o.MaxProperties)) + return errorHelp.sErr(errors.TooManyProperties(o.Path, o.In, *o.MaxProperties), o.Options.recycleResult) } - res := new(Result) + var res *Result + if o.Options.recycleResult { + res = pools.poolOfResults.BorrowResult() + } else { + res = new(Result) + } o.precheck(res, val) // check validity of field names if o.AdditionalProperties != nil && !o.AdditionalProperties.Allows { // Case: additionalProperties: false - for k := range val { - _, regularProperty := o.Properties[k] - matched := false - - for pk := range o.PatternProperties { - if matches, _ := regexp.MatchString(pk, k); matches { - matched = true - break - } + o.validateNoAdditionalProperties(val, res) + } else { + // Cases: empty additionalProperties (implying: true), or additionalProperties: true, or additionalProperties: { <> } + o.validateAdditionalProperties(val, res) + } + + o.validatePropertiesSchema(val, res) + + // Check patternProperties + // TODO: it looks like we have done that twice in many cases + for key, value := range val { + _, regularProperty := o.Properties[key] + matched, _, patterns := o.validatePatternProperty(key, value, res) // applies to regular properties as well + if regularProperty || !matched { + continue + } + + for _, pName := range patterns { + if v, ok := o.PatternProperties[pName]; ok { + r := newSchemaValidator(&v, o.Root, o.Path+"."+key, o.KnownFormats, o.Options).Validate(value) + res.mergeForField(data.(map[string]interface{}), key, r) } + } + } - if !regularProperty && k != "$schema" && k != "id" && !matched { - // Special properties "$schema" and "id" are ignored - res.AddErrors(errors.PropertyNotAllowed(o.Path, o.In, k)) - - // BUG(fredbi): This section should move to a part dedicated to spec validation as - // it will conflict with regular schemas where a property "headers" is defined. - - // - // Croaks a more explicit message on top of the standard one - // on some recognized cases. - // - // NOTE: edge cases with invalid type assertion are simply ignored here. - // NOTE: prefix your messages here by "IMPORTANT!" so there are not filtered - // by higher level callers (the IMPORTANT! tag will be eventually - // removed). - if k == "headers" && val[k] != nil { - // $ref is forbidden in header - if headers, mapOk := val[k].(map[string]interface{}); mapOk { - for headerKey, headerBody := range headers { - if headerBody != nil { - if headerSchema, mapOfMapOk := headerBody.(map[string]interface{}); mapOfMapOk { - if _, found := headerSchema["$ref"]; found { - var msg string - if refString, stringOk := headerSchema["$ref"].(string); stringOk { - msg = strings.Join([]string{", one may not use $ref=\":", refString, "\""}, "") - } - res.AddErrors(refNotAllowedInHeaderMsg(o.Path, headerKey, msg)) - } - } - } - } - } - /* - case "$ref": - if val[k] != nil { - // TODO: check context of that ref: warn about siblings, check against invalid context - } - */ - } + return res +} + +func (o *objectValidator) validateNoAdditionalProperties(val map[string]interface{}, res *Result) { + for k := range val { + if k == "$schema" || k == "id" { + // special properties "$schema" and "id" are ignored + continue + } + + _, regularProperty := o.Properties[k] + if regularProperty { + continue + } + + matched := false + for pk := range o.PatternProperties { + re, err := compileRegexp(pk) + if err != nil { + continue + } + if matches := re.MatchString(k); matches { + matched = true + break } } - } else { - // Cases: no additionalProperties (implying: true), or additionalProperties: true, or additionalProperties: { <> } - for key, value := range val { - _, regularProperty := o.Properties[key] - - // Validates property against "patternProperties" if applicable - // BUG(fredbi): succeededOnce is always false - - // NOTE: how about regular properties which do not match patternProperties? - matched, succeededOnce, _ := o.validatePatternProperty(key, value, res) - - if !(regularProperty || matched || succeededOnce) { - - // Cases: properties which are not regular properties and have not been matched by the PatternProperties validator - if o.AdditionalProperties != nil && o.AdditionalProperties.Schema != nil { - // AdditionalProperties as Schema - r := NewSchemaValidator(o.AdditionalProperties.Schema, o.Root, o.Path+"."+key, o.KnownFormats, o.Options.Options()...).Validate(value) - res.mergeForField(data.(map[string]interface{}), key, r) - } else if regularProperty && !(matched || succeededOnce) { - // TODO: this is dead code since regularProperty=false here - res.AddErrors(errors.FailedAllPatternProperties(o.Path, o.In, key)) - } + if matched { + continue + } + + res.AddErrors(errors.PropertyNotAllowed(o.Path, o.In, k)) + + // BUG(fredbi): This section should move to a part dedicated to spec validation as + // it will conflict with regular schemas where a property "headers" is defined. + + // + // Croaks a more explicit message on top of the standard one + // on some recognized cases. + // + // NOTE: edge cases with invalid type assertion are simply ignored here. + // NOTE: prefix your messages here by "IMPORTANT!" so there are not filtered + // by higher level callers (the IMPORTANT! tag will be eventually + // removed). + if k != "headers" || val[k] == nil { + continue + } + + // $ref is forbidden in header + headers, mapOk := val[k].(map[string]interface{}) + if !mapOk { + continue + } + + for headerKey, headerBody := range headers { + if headerBody == nil { + continue + } + + headerSchema, mapOfMapOk := headerBody.(map[string]interface{}) + if !mapOfMapOk { + continue + } + + _, found := headerSchema["$ref"] + if !found { + continue + } + + refString, stringOk := headerSchema["$ref"].(string) + if !stringOk { + continue } + + msg := strings.Join([]string{", one may not use $ref=\":", refString, "\""}, "") + res.AddErrors(refNotAllowedInHeaderMsg(o.Path, headerKey, msg)) + /* + case "$ref": + if val[k] != nil { + // TODO: check context of that ref: warn about siblings, check against invalid context + } + */ + } + } +} + +func (o *objectValidator) validateAdditionalProperties(val map[string]interface{}, res *Result) { + for key, value := range val { + _, regularProperty := o.Properties[key] + if regularProperty { + continue + } + + // Validates property against "patternProperties" if applicable + // BUG(fredbi): succeededOnce is always false + + // NOTE: how about regular properties which do not match patternProperties? + matched, succeededOnce, _ := o.validatePatternProperty(key, value, res) + if matched || succeededOnce { + continue + } + + if o.AdditionalProperties == nil || o.AdditionalProperties.Schema == nil { + continue } - // Valid cases: additionalProperties: true or undefined + + // Cases: properties which are not regular properties and have not been matched by the PatternProperties validator + // AdditionalProperties as Schema + r := newSchemaValidator(o.AdditionalProperties.Schema, o.Root, o.Path+"."+key, o.KnownFormats, o.Options).Validate(value) + res.mergeForField(val, key, r) } + // Valid cases: additionalProperties: true or undefined +} - createdFromDefaults := map[string]bool{} +func (o *objectValidator) validatePropertiesSchema(val map[string]interface{}, res *Result) { + createdFromDefaults := map[string]struct{}{} // Property types: // - regular Property + pSchema := pools.poolOfSchemas.BorrowSchema() // recycle a spec.Schema object which lifespan extends only to the validation of properties + defer func() { + pools.poolOfSchemas.RedeemSchema(pSchema) + }() + for pName := range o.Properties { - pSchema := o.Properties[pName] // one instance per iteration - rName := pName - if o.Path != "" { + *pSchema = o.Properties[pName] + var rName string + if o.Path == "" { + rName = pName + } else { rName = o.Path + "." + pName } // Recursively validates each property against its schema - if v, ok := val[pName]; ok { - r := NewSchemaValidator(&pSchema, o.Root, rName, o.KnownFormats, o.Options.Options()...).Validate(v) - res.mergeForField(data.(map[string]interface{}), pName, r) - } else if pSchema.Default != nil { - // If a default value is defined, creates the property from defaults - // NOTE: JSON schema does not enforce default values to be valid against schema. Swagger does. - createdFromDefaults[pName] = true - res.addPropertySchemata(data.(map[string]interface{}), pName, &pSchema) + v, ok := val[pName] + if ok { + r := newSchemaValidator(pSchema, o.Root, rName, o.KnownFormats, o.Options).Validate(v) + res.mergeForField(val, pName, r) + + continue } - } - // Check required properties - if len(o.Required) > 0 { - for _, k := range o.Required { - if v, ok := val[k]; !ok && !createdFromDefaults[k] { - res.AddErrors(errors.Required(o.Path+"."+k, o.In, v)) - continue + if pSchema.Default != nil { + // if a default value is defined, creates the property from defaults + // NOTE: JSON schema does not enforce default values to be valid against schema. Swagger does. + createdFromDefaults[pName] = struct{}{} + if !o.Options.skipSchemataResult { + res.addPropertySchemata(val, pName, pSchema) // this shallow-clones the content of the pSchema pointer } } } - // Check patternProperties - // TODO: it looks like we have done that twice in many cases - for key, value := range val { - _, regularProperty := o.Properties[key] - matched, _ /*succeededOnce*/, patterns := o.validatePatternProperty(key, value, res) - if !regularProperty && (matched /*|| succeededOnce*/) { - for _, pName := range patterns { - if v, ok := o.PatternProperties[pName]; ok { - r := NewSchemaValidator(&v, o.Root, o.Path+"."+key, o.KnownFormats, o.Options.Options()...).Validate(value) - res.mergeForField(data.(map[string]interface{}), key, r) - } - } + if len(o.Required) == 0 { + return + } + + // Check required properties + for _, k := range o.Required { + v, ok := val[k] + if ok { + continue + } + _, isCreatedFromDefaults := createdFromDefaults[k] + if isCreatedFromDefaults { + continue } + + res.AddErrors(errors.Required(fmt.Sprintf("%s.%s", o.Path, k), o.In, v)) } - return res } // TODO: succeededOnce is not used anywhere func (o *objectValidator) validatePatternProperty(key string, value interface{}, result *Result) (bool, bool, []string) { + if len(o.PatternProperties) == 0 { + return false, false, nil + } + matched := false succeededOnce := false - var patterns []string + patterns := make([]string, 0, len(o.PatternProperties)) - for k, schema := range o.PatternProperties { - sch := schema - if match, _ := regexp.MatchString(k, key); match { - patterns = append(patterns, k) - matched = true - validator := NewSchemaValidator(&sch, o.Root, o.Path+"."+key, o.KnownFormats, o.Options.Options()...) + schema := pools.poolOfSchemas.BorrowSchema() + defer func() { + pools.poolOfSchemas.RedeemSchema(schema) + }() - res := validator.Validate(value) - result.Merge(res) + for k := range o.PatternProperties { + re, err := compileRegexp(k) + if err != nil { + continue } - } - // BUG(fredbi): can't get to here. Should remove dead code (commented out). + match := re.MatchString(key) + if !match { + continue + } - // if succeededOnce { - // result.Inc() - // } + *schema = o.PatternProperties[k] + patterns = append(patterns, k) + matched = true + validator := newSchemaValidator(schema, o.Root, fmt.Sprintf("%s.%s", o.Path, key), o.KnownFormats, o.Options) + + res := validator.Validate(value) + result.Merge(res) + } return matched, succeededOnce, patterns } + +func (o *objectValidator) redeem() { + pools.poolOfObjectValidators.RedeemValidator(o) +} diff --git a/vendor/github.com/go-openapi/validate/options.go b/vendor/github.com/go-openapi/validate/options.go index deeec2f2ec..cfe9b0660f 100644 --- a/vendor/github.com/go-openapi/validate/options.go +++ b/vendor/github.com/go-openapi/validate/options.go @@ -21,10 +21,29 @@ import "sync" // NOTE: other options might be needed, for example a go-swagger specific mode. type Opts struct { ContinueOnErrors bool // true: continue reporting errors, even if spec is invalid + + // StrictPathParamUniqueness enables a strict validation of paths that include + // path parameters. When true, it will enforce that for each method, the path + // is unique, regardless of path parameters such that GET:/petstore/{id} and + // GET:/petstore/{pet} anre considered duplicate paths. + // + // Consider disabling if path parameters can include slashes such as + // GET:/v1/{shelve} and GET:/v1/{book}, where the IDs are "shelve/*" and + // /"shelve/*/book/*" respectively. + StrictPathParamUniqueness bool + SkipSchemataResult bool } var ( - defaultOpts = Opts{ContinueOnErrors: false} // default is to stop validation on errors + defaultOpts = Opts{ + // default is to stop validation on errors + ContinueOnErrors: false, + + // StrictPathParamUniqueness is defaulted to true. This maintains existing + // behavior. + StrictPathParamUniqueness: true, + } + defaultOptsMutex = &sync.Mutex{} ) diff --git a/vendor/github.com/go-openapi/validate/pools.go b/vendor/github.com/go-openapi/validate/pools.go new file mode 100644 index 0000000000..3ddce4dcc2 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/pools.go @@ -0,0 +1,366 @@ +//go:build !validatedebug + +package validate + +import ( + "sync" + + "github.com/go-openapi/spec" +) + +var pools allPools + +func init() { + resetPools() +} + +func resetPools() { + // NOTE: for testing purpose, we might want to reset pools after calling Validate twice. + // The pool is corrupted in that case: calling Put twice inserts a duplicate in the pool + // and further calls to Get are mishandled. + + pools = allPools{ + poolOfSchemaValidators: schemaValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &SchemaValidator{} + + return s + }, + }, + }, + poolOfObjectValidators: objectValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &objectValidator{} + + return s + }, + }, + }, + poolOfSliceValidators: sliceValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &schemaSliceValidator{} + + return s + }, + }, + }, + poolOfItemsValidators: itemsValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &itemsValidator{} + + return s + }, + }, + }, + poolOfBasicCommonValidators: basicCommonValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &basicCommonValidator{} + + return s + }, + }, + }, + poolOfHeaderValidators: headerValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &HeaderValidator{} + + return s + }, + }, + }, + poolOfParamValidators: paramValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &ParamValidator{} + + return s + }, + }, + }, + poolOfBasicSliceValidators: basicSliceValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &basicSliceValidator{} + + return s + }, + }, + }, + poolOfNumberValidators: numberValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &numberValidator{} + + return s + }, + }, + }, + poolOfStringValidators: stringValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &stringValidator{} + + return s + }, + }, + }, + poolOfSchemaPropsValidators: schemaPropsValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &schemaPropsValidator{} + + return s + }, + }, + }, + poolOfFormatValidators: formatValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &formatValidator{} + + return s + }, + }, + }, + poolOfTypeValidators: typeValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &typeValidator{} + + return s + }, + }, + }, + poolOfSchemas: schemasPool{ + Pool: &sync.Pool{ + New: func() any { + s := &spec.Schema{} + + return s + }, + }, + }, + poolOfResults: resultsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &Result{} + + return s + }, + }, + }, + } +} + +type ( + allPools struct { + // memory pools for all validator objects. + // + // Each pool can be borrowed from and redeemed to. + poolOfSchemaValidators schemaValidatorsPool + poolOfObjectValidators objectValidatorsPool + poolOfSliceValidators sliceValidatorsPool + poolOfItemsValidators itemsValidatorsPool + poolOfBasicCommonValidators basicCommonValidatorsPool + poolOfHeaderValidators headerValidatorsPool + poolOfParamValidators paramValidatorsPool + poolOfBasicSliceValidators basicSliceValidatorsPool + poolOfNumberValidators numberValidatorsPool + poolOfStringValidators stringValidatorsPool + poolOfSchemaPropsValidators schemaPropsValidatorsPool + poolOfFormatValidators formatValidatorsPool + poolOfTypeValidators typeValidatorsPool + poolOfSchemas schemasPool + poolOfResults resultsPool + } + + schemaValidatorsPool struct { + *sync.Pool + } + + objectValidatorsPool struct { + *sync.Pool + } + + sliceValidatorsPool struct { + *sync.Pool + } + + itemsValidatorsPool struct { + *sync.Pool + } + + basicCommonValidatorsPool struct { + *sync.Pool + } + + headerValidatorsPool struct { + *sync.Pool + } + + paramValidatorsPool struct { + *sync.Pool + } + + basicSliceValidatorsPool struct { + *sync.Pool + } + + numberValidatorsPool struct { + *sync.Pool + } + + stringValidatorsPool struct { + *sync.Pool + } + + schemaPropsValidatorsPool struct { + *sync.Pool + } + + formatValidatorsPool struct { + *sync.Pool + } + + typeValidatorsPool struct { + *sync.Pool + } + + schemasPool struct { + *sync.Pool + } + + resultsPool struct { + *sync.Pool + } +) + +func (p schemaValidatorsPool) BorrowValidator() *SchemaValidator { + return p.Get().(*SchemaValidator) +} + +func (p schemaValidatorsPool) RedeemValidator(s *SchemaValidator) { + // NOTE: s might be nil. In that case, Put is a noop. + p.Put(s) +} + +func (p objectValidatorsPool) BorrowValidator() *objectValidator { + return p.Get().(*objectValidator) +} + +func (p objectValidatorsPool) RedeemValidator(s *objectValidator) { + p.Put(s) +} + +func (p sliceValidatorsPool) BorrowValidator() *schemaSliceValidator { + return p.Get().(*schemaSliceValidator) +} + +func (p sliceValidatorsPool) RedeemValidator(s *schemaSliceValidator) { + p.Put(s) +} + +func (p itemsValidatorsPool) BorrowValidator() *itemsValidator { + return p.Get().(*itemsValidator) +} + +func (p itemsValidatorsPool) RedeemValidator(s *itemsValidator) { + p.Put(s) +} + +func (p basicCommonValidatorsPool) BorrowValidator() *basicCommonValidator { + return p.Get().(*basicCommonValidator) +} + +func (p basicCommonValidatorsPool) RedeemValidator(s *basicCommonValidator) { + p.Put(s) +} + +func (p headerValidatorsPool) BorrowValidator() *HeaderValidator { + return p.Get().(*HeaderValidator) +} + +func (p headerValidatorsPool) RedeemValidator(s *HeaderValidator) { + p.Put(s) +} + +func (p paramValidatorsPool) BorrowValidator() *ParamValidator { + return p.Get().(*ParamValidator) +} + +func (p paramValidatorsPool) RedeemValidator(s *ParamValidator) { + p.Put(s) +} + +func (p basicSliceValidatorsPool) BorrowValidator() *basicSliceValidator { + return p.Get().(*basicSliceValidator) +} + +func (p basicSliceValidatorsPool) RedeemValidator(s *basicSliceValidator) { + p.Put(s) +} + +func (p numberValidatorsPool) BorrowValidator() *numberValidator { + return p.Get().(*numberValidator) +} + +func (p numberValidatorsPool) RedeemValidator(s *numberValidator) { + p.Put(s) +} + +func (p stringValidatorsPool) BorrowValidator() *stringValidator { + return p.Get().(*stringValidator) +} + +func (p stringValidatorsPool) RedeemValidator(s *stringValidator) { + p.Put(s) +} + +func (p schemaPropsValidatorsPool) BorrowValidator() *schemaPropsValidator { + return p.Get().(*schemaPropsValidator) +} + +func (p schemaPropsValidatorsPool) RedeemValidator(s *schemaPropsValidator) { + p.Put(s) +} + +func (p formatValidatorsPool) BorrowValidator() *formatValidator { + return p.Get().(*formatValidator) +} + +func (p formatValidatorsPool) RedeemValidator(s *formatValidator) { + p.Put(s) +} + +func (p typeValidatorsPool) BorrowValidator() *typeValidator { + return p.Get().(*typeValidator) +} + +func (p typeValidatorsPool) RedeemValidator(s *typeValidator) { + p.Put(s) +} + +func (p schemasPool) BorrowSchema() *spec.Schema { + return p.Get().(*spec.Schema) +} + +func (p schemasPool) RedeemSchema(s *spec.Schema) { + p.Put(s) +} + +func (p resultsPool) BorrowResult() *Result { + return p.Get().(*Result).cleared() +} + +func (p resultsPool) RedeemResult(s *Result) { + if s == emptyResult { + return + } + p.Put(s) +} diff --git a/vendor/github.com/go-openapi/validate/pools_debug.go b/vendor/github.com/go-openapi/validate/pools_debug.go new file mode 100644 index 0000000000..12949f02a7 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/pools_debug.go @@ -0,0 +1,1012 @@ +//go:build validatedebug + +package validate + +import ( + "fmt" + "runtime" + "sync" + "testing" + + "github.com/go-openapi/spec" +) + +// This version of the pools is to be used for debugging and testing, with build tag "validatedebug". +// +// In this mode, the pools are tracked for allocation and redemption of borrowed objects, so we can +// verify a few behaviors of the validators. The debug pools panic when an invalid usage pattern is detected. + +var pools allPools + +func init() { + resetPools() +} + +func resetPools() { + // NOTE: for testing purpose, we might want to reset pools after calling Validate twice. + // The pool is corrupted in that case: calling Put twice inserts a duplicate in the pool + // and further calls to Get are mishandled. + + pools = allPools{ + poolOfSchemaValidators: schemaValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &SchemaValidator{} + + return s + }, + }, + debugMap: make(map[*SchemaValidator]status), + allocMap: make(map[*SchemaValidator]string), + redeemMap: make(map[*SchemaValidator]string), + }, + poolOfObjectValidators: objectValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &objectValidator{} + + return s + }, + }, + debugMap: make(map[*objectValidator]status), + allocMap: make(map[*objectValidator]string), + redeemMap: make(map[*objectValidator]string), + }, + poolOfSliceValidators: sliceValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &schemaSliceValidator{} + + return s + }, + }, + debugMap: make(map[*schemaSliceValidator]status), + allocMap: make(map[*schemaSliceValidator]string), + redeemMap: make(map[*schemaSliceValidator]string), + }, + poolOfItemsValidators: itemsValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &itemsValidator{} + + return s + }, + }, + debugMap: make(map[*itemsValidator]status), + allocMap: make(map[*itemsValidator]string), + redeemMap: make(map[*itemsValidator]string), + }, + poolOfBasicCommonValidators: basicCommonValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &basicCommonValidator{} + + return s + }, + }, + debugMap: make(map[*basicCommonValidator]status), + allocMap: make(map[*basicCommonValidator]string), + redeemMap: make(map[*basicCommonValidator]string), + }, + poolOfHeaderValidators: headerValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &HeaderValidator{} + + return s + }, + }, + debugMap: make(map[*HeaderValidator]status), + allocMap: make(map[*HeaderValidator]string), + redeemMap: make(map[*HeaderValidator]string), + }, + poolOfParamValidators: paramValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &ParamValidator{} + + return s + }, + }, + debugMap: make(map[*ParamValidator]status), + allocMap: make(map[*ParamValidator]string), + redeemMap: make(map[*ParamValidator]string), + }, + poolOfBasicSliceValidators: basicSliceValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &basicSliceValidator{} + + return s + }, + }, + debugMap: make(map[*basicSliceValidator]status), + allocMap: make(map[*basicSliceValidator]string), + redeemMap: make(map[*basicSliceValidator]string), + }, + poolOfNumberValidators: numberValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &numberValidator{} + + return s + }, + }, + debugMap: make(map[*numberValidator]status), + allocMap: make(map[*numberValidator]string), + redeemMap: make(map[*numberValidator]string), + }, + poolOfStringValidators: stringValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &stringValidator{} + + return s + }, + }, + debugMap: make(map[*stringValidator]status), + allocMap: make(map[*stringValidator]string), + redeemMap: make(map[*stringValidator]string), + }, + poolOfSchemaPropsValidators: schemaPropsValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &schemaPropsValidator{} + + return s + }, + }, + debugMap: make(map[*schemaPropsValidator]status), + allocMap: make(map[*schemaPropsValidator]string), + redeemMap: make(map[*schemaPropsValidator]string), + }, + poolOfFormatValidators: formatValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &formatValidator{} + + return s + }, + }, + debugMap: make(map[*formatValidator]status), + allocMap: make(map[*formatValidator]string), + redeemMap: make(map[*formatValidator]string), + }, + poolOfTypeValidators: typeValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &typeValidator{} + + return s + }, + }, + debugMap: make(map[*typeValidator]status), + allocMap: make(map[*typeValidator]string), + redeemMap: make(map[*typeValidator]string), + }, + poolOfSchemas: schemasPool{ + Pool: &sync.Pool{ + New: func() any { + s := &spec.Schema{} + + return s + }, + }, + debugMap: make(map[*spec.Schema]status), + allocMap: make(map[*spec.Schema]string), + redeemMap: make(map[*spec.Schema]string), + }, + poolOfResults: resultsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &Result{} + + return s + }, + }, + debugMap: make(map[*Result]status), + allocMap: make(map[*Result]string), + redeemMap: make(map[*Result]string), + }, + } +} + +const ( + statusFresh status = iota + 1 + statusRecycled + statusRedeemed +) + +func (s status) String() string { + switch s { + case statusFresh: + return "fresh" + case statusRecycled: + return "recycled" + case statusRedeemed: + return "redeemed" + default: + panic(fmt.Errorf("invalid status: %d", s)) + } +} + +type ( + // Debug + status uint8 + + allPools struct { + // memory pools for all validator objects. + // + // Each pool can be borrowed from and redeemed to. + poolOfSchemaValidators schemaValidatorsPool + poolOfObjectValidators objectValidatorsPool + poolOfSliceValidators sliceValidatorsPool + poolOfItemsValidators itemsValidatorsPool + poolOfBasicCommonValidators basicCommonValidatorsPool + poolOfHeaderValidators headerValidatorsPool + poolOfParamValidators paramValidatorsPool + poolOfBasicSliceValidators basicSliceValidatorsPool + poolOfNumberValidators numberValidatorsPool + poolOfStringValidators stringValidatorsPool + poolOfSchemaPropsValidators schemaPropsValidatorsPool + poolOfFormatValidators formatValidatorsPool + poolOfTypeValidators typeValidatorsPool + poolOfSchemas schemasPool + poolOfResults resultsPool + } + + schemaValidatorsPool struct { + *sync.Pool + debugMap map[*SchemaValidator]status + allocMap map[*SchemaValidator]string + redeemMap map[*SchemaValidator]string + mx sync.Mutex + } + + objectValidatorsPool struct { + *sync.Pool + debugMap map[*objectValidator]status + allocMap map[*objectValidator]string + redeemMap map[*objectValidator]string + mx sync.Mutex + } + + sliceValidatorsPool struct { + *sync.Pool + debugMap map[*schemaSliceValidator]status + allocMap map[*schemaSliceValidator]string + redeemMap map[*schemaSliceValidator]string + mx sync.Mutex + } + + itemsValidatorsPool struct { + *sync.Pool + debugMap map[*itemsValidator]status + allocMap map[*itemsValidator]string + redeemMap map[*itemsValidator]string + mx sync.Mutex + } + + basicCommonValidatorsPool struct { + *sync.Pool + debugMap map[*basicCommonValidator]status + allocMap map[*basicCommonValidator]string + redeemMap map[*basicCommonValidator]string + mx sync.Mutex + } + + headerValidatorsPool struct { + *sync.Pool + debugMap map[*HeaderValidator]status + allocMap map[*HeaderValidator]string + redeemMap map[*HeaderValidator]string + mx sync.Mutex + } + + paramValidatorsPool struct { + *sync.Pool + debugMap map[*ParamValidator]status + allocMap map[*ParamValidator]string + redeemMap map[*ParamValidator]string + mx sync.Mutex + } + + basicSliceValidatorsPool struct { + *sync.Pool + debugMap map[*basicSliceValidator]status + allocMap map[*basicSliceValidator]string + redeemMap map[*basicSliceValidator]string + mx sync.Mutex + } + + numberValidatorsPool struct { + *sync.Pool + debugMap map[*numberValidator]status + allocMap map[*numberValidator]string + redeemMap map[*numberValidator]string + mx sync.Mutex + } + + stringValidatorsPool struct { + *sync.Pool + debugMap map[*stringValidator]status + allocMap map[*stringValidator]string + redeemMap map[*stringValidator]string + mx sync.Mutex + } + + schemaPropsValidatorsPool struct { + *sync.Pool + debugMap map[*schemaPropsValidator]status + allocMap map[*schemaPropsValidator]string + redeemMap map[*schemaPropsValidator]string + mx sync.Mutex + } + + formatValidatorsPool struct { + *sync.Pool + debugMap map[*formatValidator]status + allocMap map[*formatValidator]string + redeemMap map[*formatValidator]string + mx sync.Mutex + } + + typeValidatorsPool struct { + *sync.Pool + debugMap map[*typeValidator]status + allocMap map[*typeValidator]string + redeemMap map[*typeValidator]string + mx sync.Mutex + } + + schemasPool struct { + *sync.Pool + debugMap map[*spec.Schema]status + allocMap map[*spec.Schema]string + redeemMap map[*spec.Schema]string + mx sync.Mutex + } + + resultsPool struct { + *sync.Pool + debugMap map[*Result]status + allocMap map[*Result]string + redeemMap map[*Result]string + mx sync.Mutex + } +) + +func (p *schemaValidatorsPool) BorrowValidator() *SchemaValidator { + s := p.Get().(*SchemaValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled schema should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *schemaValidatorsPool) RedeemValidator(s *SchemaValidator) { + // NOTE: s might be nil. In that case, Put is a noop. + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed schema should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed schema should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *objectValidatorsPool) BorrowValidator() *objectValidator { + s := p.Get().(*objectValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled object should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *objectValidatorsPool) RedeemValidator(s *objectValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed object should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed object should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *sliceValidatorsPool) BorrowValidator() *schemaSliceValidator { + s := p.Get().(*schemaSliceValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled schemaSliceValidator should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *sliceValidatorsPool) RedeemValidator(s *schemaSliceValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed schemaSliceValidator should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed schemaSliceValidator should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *itemsValidatorsPool) BorrowValidator() *itemsValidator { + s := p.Get().(*itemsValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled itemsValidator should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *itemsValidatorsPool) RedeemValidator(s *itemsValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed itemsValidator should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed itemsValidator should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *basicCommonValidatorsPool) BorrowValidator() *basicCommonValidator { + s := p.Get().(*basicCommonValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled basicCommonValidator should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *basicCommonValidatorsPool) RedeemValidator(s *basicCommonValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed basicCommonValidator should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed basicCommonValidator should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *headerValidatorsPool) BorrowValidator() *HeaderValidator { + s := p.Get().(*HeaderValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled HeaderValidator should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *headerValidatorsPool) RedeemValidator(s *HeaderValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed header should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed header should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *paramValidatorsPool) BorrowValidator() *ParamValidator { + s := p.Get().(*ParamValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled param should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *paramValidatorsPool) RedeemValidator(s *ParamValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed param should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed param should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *basicSliceValidatorsPool) BorrowValidator() *basicSliceValidator { + s := p.Get().(*basicSliceValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled basicSliceValidator should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *basicSliceValidatorsPool) RedeemValidator(s *basicSliceValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed basicSliceValidator should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed basicSliceValidator should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *numberValidatorsPool) BorrowValidator() *numberValidator { + s := p.Get().(*numberValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled number should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *numberValidatorsPool) RedeemValidator(s *numberValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed number should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed number should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *stringValidatorsPool) BorrowValidator() *stringValidator { + s := p.Get().(*stringValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled string should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *stringValidatorsPool) RedeemValidator(s *stringValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed string should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed string should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *schemaPropsValidatorsPool) BorrowValidator() *schemaPropsValidator { + s := p.Get().(*schemaPropsValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled param should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *schemaPropsValidatorsPool) RedeemValidator(s *schemaPropsValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed schemaProps should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed schemaProps should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *formatValidatorsPool) BorrowValidator() *formatValidator { + s := p.Get().(*formatValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled format should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *formatValidatorsPool) RedeemValidator(s *formatValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed format should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed format should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *typeValidatorsPool) BorrowValidator() *typeValidator { + s := p.Get().(*typeValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled type should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *typeValidatorsPool) RedeemValidator(s *typeValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed type should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic(fmt.Errorf("redeemed type should have been allocated from a fresh or recycled pointer. Got status %s, already redeamed at: %s", x, p.redeemMap[s])) + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *schemasPool) BorrowSchema() *spec.Schema { + s := p.Get().(*spec.Schema) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled spec.Schema should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *schemasPool) RedeemSchema(s *spec.Schema) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed spec.Schema should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed spec.Schema should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *resultsPool) BorrowResult() *Result { + s := p.Get().(*Result).cleared() + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled result should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *resultsPool) RedeemResult(s *Result) { + if s == emptyResult { + if len(s.Errors) > 0 || len(s.Warnings) > 0 { + panic("empty result should not mutate") + } + return + } + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed Result should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed Result should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *allPools) allIsRedeemed(t testing.TB) bool { + outcome := true + for k, v := range p.poolOfSchemaValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("schemaValidator should be redeemed. Allocated by: %s", p.poolOfSchemaValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfObjectValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("objectValidator should be redeemed. Allocated by: %s", p.poolOfObjectValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfSliceValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("sliceValidator should be redeemed. Allocated by: %s", p.poolOfSliceValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfItemsValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("itemsValidator should be redeemed. Allocated by: %s", p.poolOfItemsValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfBasicCommonValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("basicCommonValidator should be redeemed. Allocated by: %s", p.poolOfBasicCommonValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfHeaderValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("headerValidator should be redeemed. Allocated by: %s", p.poolOfHeaderValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfParamValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("paramValidator should be redeemed. Allocated by: %s", p.poolOfParamValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfBasicSliceValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("basicSliceValidator should be redeemed. Allocated by: %s", p.poolOfBasicSliceValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfNumberValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("numberValidator should be redeemed. Allocated by: %s", p.poolOfNumberValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfStringValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("stringValidator should be redeemed. Allocated by: %s", p.poolOfStringValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfSchemaPropsValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("schemaPropsValidator should be redeemed. Allocated by: %s", p.poolOfSchemaPropsValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfFormatValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("formatValidator should be redeemed. Allocated by: %s", p.poolOfFormatValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfTypeValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("typeValidator should be redeemed. Allocated by: %s", p.poolOfTypeValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfSchemas.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("schemas should be redeemed. Allocated by: %s", p.poolOfSchemas.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfResults.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("result should be redeemed. Allocated by: %s", p.poolOfResults.allocMap[k]) + outcome = false + } + + return outcome +} + +func caller() string { + pc, _, _, _ := runtime.Caller(3) //nolint:dogsled + from, line := runtime.FuncForPC(pc).FileLine(pc) + + return fmt.Sprintf("%s:%d", from, line) +} diff --git a/vendor/github.com/go-openapi/validate/result.go b/vendor/github.com/go-openapi/validate/result.go index 8f5f935e5d..c80804a93d 100644 --- a/vendor/github.com/go-openapi/validate/result.go +++ b/vendor/github.com/go-openapi/validate/result.go @@ -15,7 +15,7 @@ package validate import ( - "fmt" + stderrors "errors" "reflect" "strings" @@ -23,6 +23,8 @@ import ( "github.com/go-openapi/spec" ) +var emptyResult = &Result{MatchCount: 1} + // Result represents a validation result set, composed of // errors and warnings. // @@ -50,8 +52,10 @@ type Result struct { // Schemata for slice items itemSchemata []itemSchemata - cachedFieldSchemta map[FieldKey][]*spec.Schema - cachedItemSchemata map[ItemKey][]*spec.Schema + cachedFieldSchemata map[FieldKey][]*spec.Schema + cachedItemSchemata map[ItemKey][]*spec.Schema + + wantsRedeemOnMerge bool } // FieldKey is a pair of an object and a field, usable as a key for a map. @@ -116,6 +120,9 @@ func (r *Result) Merge(others ...*Result) *Result { } r.mergeWithoutRootSchemata(other) r.rootObjectSchemata.Append(other.rootObjectSchemata) + if other.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(other) + } } return r } @@ -132,10 +139,9 @@ func (r *Result) RootObjectSchemata() []*spec.Schema { } // FieldSchemata returns the schemata which apply to fields in objects. -// nolint: dupl func (r *Result) FieldSchemata() map[FieldKey][]*spec.Schema { - if r.cachedFieldSchemta != nil { - return r.cachedFieldSchemta + if r.cachedFieldSchemata != nil { + return r.cachedFieldSchemata } ret := make(map[FieldKey][]*spec.Schema, len(r.fieldSchemata)) @@ -147,12 +153,12 @@ func (r *Result) FieldSchemata() map[FieldKey][]*spec.Schema { ret[key] = append(ret[key], fs.schemata.multiple...) } } - r.cachedFieldSchemta = ret + r.cachedFieldSchemata = ret + return ret } // ItemSchemata returns the schemata which apply to items in slices. -// nolint: dupl func (r *Result) ItemSchemata() map[ItemKey][]*spec.Schema { if r.cachedItemSchemata != nil { return r.cachedItemSchemata @@ -172,12 +178,13 @@ func (r *Result) ItemSchemata() map[ItemKey][]*spec.Schema { } func (r *Result) resetCaches() { - r.cachedFieldSchemta = nil + r.cachedFieldSchemata = nil r.cachedItemSchemata = nil } // mergeForField merges other into r, assigning other's root schemata to the given Object and field name. -// nolint: unparam +// +//nolint:unparam func (r *Result) mergeForField(obj map[string]interface{}, field string, other *Result) *Result { if other == nil { return r @@ -188,18 +195,23 @@ func (r *Result) mergeForField(obj map[string]interface{}, field string, other * if r.fieldSchemata == nil { r.fieldSchemata = make([]fieldSchemata, len(obj)) } + // clone other schemata, as other is about to be redeemed to the pool r.fieldSchemata = append(r.fieldSchemata, fieldSchemata{ obj: obj, field: field, - schemata: other.rootObjectSchemata, + schemata: other.rootObjectSchemata.Clone(), }) } + if other.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(other) + } return r } // mergeForSlice merges other into r, assigning other's root schemata to the given slice and index. -// nolint: unparam +// +//nolint:unparam func (r *Result) mergeForSlice(slice reflect.Value, i int, other *Result) *Result { if other == nil { return r @@ -210,29 +222,38 @@ func (r *Result) mergeForSlice(slice reflect.Value, i int, other *Result) *Resul if r.itemSchemata == nil { r.itemSchemata = make([]itemSchemata, slice.Len()) } + // clone other schemata, as other is about to be redeemed to the pool r.itemSchemata = append(r.itemSchemata, itemSchemata{ slice: slice, index: i, - schemata: other.rootObjectSchemata, + schemata: other.rootObjectSchemata.Clone(), }) } + if other.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(other) + } + return r } // addRootObjectSchemata adds the given schemata for the root object of the result. -// The slice schemata might be reused. I.e. do not modify it after being added to a result. +// +// Since the slice schemata might be reused, it is shallow-cloned before saving it into the result. func (r *Result) addRootObjectSchemata(s *spec.Schema) { - r.rootObjectSchemata.Append(schemata{one: s}) + clone := *s + r.rootObjectSchemata.Append(schemata{one: &clone}) } // addPropertySchemata adds the given schemata for the object and field. -// The slice schemata might be reused. I.e. do not modify it after being added to a result. +// +// Since the slice schemata might be reused, it is shallow-cloned before saving it into the result. func (r *Result) addPropertySchemata(obj map[string]interface{}, fld string, schema *spec.Schema) { if r.fieldSchemata == nil { r.fieldSchemata = make([]fieldSchemata, 0, len(obj)) } - r.fieldSchemata = append(r.fieldSchemata, fieldSchemata{obj: obj, field: fld, schemata: schemata{one: schema}}) + clone := *schema + r.fieldSchemata = append(r.fieldSchemata, fieldSchemata{obj: obj, field: fld, schemata: schemata{one: &clone}}) } /* @@ -255,17 +276,21 @@ func (r *Result) mergeWithoutRootSchemata(other *Result) { if other.fieldSchemata != nil { if r.fieldSchemata == nil { - r.fieldSchemata = other.fieldSchemata - } else { - r.fieldSchemata = append(r.fieldSchemata, other.fieldSchemata...) + r.fieldSchemata = make([]fieldSchemata, 0, len(other.fieldSchemata)) + } + for _, field := range other.fieldSchemata { + field.schemata = field.schemata.Clone() + r.fieldSchemata = append(r.fieldSchemata, field) } } if other.itemSchemata != nil { if r.itemSchemata == nil { - r.itemSchemata = other.itemSchemata - } else { - r.itemSchemata = append(r.itemSchemata, other.itemSchemata...) + r.itemSchemata = make([]itemSchemata, 0, len(other.itemSchemata)) + } + for _, field := range other.itemSchemata { + field.schemata = field.schemata.Clone() + r.itemSchemata = append(r.itemSchemata, field) } } } @@ -280,6 +305,9 @@ func (r *Result) MergeAsErrors(others ...*Result) *Result { r.AddErrors(other.Errors...) r.AddErrors(other.Warnings...) r.MatchCount += other.MatchCount + if other.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(other) + } } } return r @@ -295,6 +323,9 @@ func (r *Result) MergeAsWarnings(others ...*Result) *Result { r.AddWarnings(other.Errors...) r.AddWarnings(other.Warnings...) r.MatchCount += other.MatchCount + if other.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(other) + } } } return r @@ -356,16 +387,21 @@ func (r *Result) keepRelevantErrors() *Result { strippedErrors := []error{} for _, e := range r.Errors { if strings.HasPrefix(e.Error(), "IMPORTANT!") { - strippedErrors = append(strippedErrors, fmt.Errorf(strings.TrimPrefix(e.Error(), "IMPORTANT!"))) + strippedErrors = append(strippedErrors, stderrors.New(strings.TrimPrefix(e.Error(), "IMPORTANT!"))) } } strippedWarnings := []error{} for _, e := range r.Warnings { if strings.HasPrefix(e.Error(), "IMPORTANT!") { - strippedWarnings = append(strippedWarnings, fmt.Errorf(strings.TrimPrefix(e.Error(), "IMPORTANT!"))) + strippedWarnings = append(strippedWarnings, stderrors.New(strings.TrimPrefix(e.Error(), "IMPORTANT!"))) } } - strippedResult := new(Result) + var strippedResult *Result + if r.wantsRedeemOnMerge { + strippedResult = pools.poolOfResults.BorrowResult() + } else { + strippedResult = new(Result) + } strippedResult.Errors = strippedErrors strippedResult.Warnings = strippedWarnings return strippedResult @@ -427,6 +463,27 @@ func (r *Result) AsError() error { return errors.CompositeValidationError(r.Errors...) } +func (r *Result) cleared() *Result { + // clear the Result to be reusable. Keep allocated capacity. + r.Errors = r.Errors[:0] + r.Warnings = r.Warnings[:0] + r.MatchCount = 0 + r.data = nil + r.rootObjectSchemata.one = nil + r.rootObjectSchemata.multiple = r.rootObjectSchemata.multiple[:0] + r.fieldSchemata = r.fieldSchemata[:0] + r.itemSchemata = r.itemSchemata[:0] + for k := range r.cachedFieldSchemata { + delete(r.cachedFieldSchemata, k) + } + for k := range r.cachedItemSchemata { + delete(r.cachedItemSchemata, k) + } + r.wantsRedeemOnMerge = true // mark this result as eligible for redeem when merged into another + + return r +} + // schemata is an arbitrary number of schemata. It does a distinction between zero, // one and many schemata to avoid slice allocations. type schemata struct { @@ -453,7 +510,7 @@ func (s *schemata) Slice() []*spec.Schema { return s.multiple } -// appendSchemata appends the schemata in other to s. It mutated s in-place. +// appendSchemata appends the schemata in other to s. It mutates s in-place. func (s *schemata) Append(other schemata) { if other.one == nil && len(other.multiple) == 0 { return @@ -484,3 +541,23 @@ func (s *schemata) Append(other schemata) { } } } + +func (s schemata) Clone() schemata { + var clone schemata + + if s.one != nil { + clone.one = new(spec.Schema) + *clone.one = *s.one + } + + if len(s.multiple) > 0 { + clone.multiple = make([]*spec.Schema, len(s.multiple)) + for idx := 0; idx < len(s.multiple); idx++ { + sp := new(spec.Schema) + *sp = *s.multiple[idx] + clone.multiple[idx] = sp + } + } + + return clone +} diff --git a/vendor/github.com/go-openapi/validate/schema.go b/vendor/github.com/go-openapi/validate/schema.go index b817eb0ef3..db65264fd1 100644 --- a/vendor/github.com/go-openapi/validate/schema.go +++ b/vendor/github.com/go-openapi/validate/schema.go @@ -24,32 +24,32 @@ import ( "github.com/go-openapi/swag" ) -var ( - specSchemaType = reflect.TypeOf(&spec.Schema{}) - specParameterType = reflect.TypeOf(&spec.Parameter{}) - specHeaderType = reflect.TypeOf(&spec.Header{}) - // specItemsType = reflect.TypeOf(&spec.Items{}) -) - // SchemaValidator validates data against a JSON schema type SchemaValidator struct { Path string in string Schema *spec.Schema - validators []valueValidator + validators [8]valueValidator Root interface{} KnownFormats strfmt.Registry - Options SchemaValidatorOptions + Options *SchemaValidatorOptions } // AgainstSchema validates the specified data against the provided schema, using a registry of supported formats. // // When no pre-parsed *spec.Schema structure is provided, it uses a JSON schema as default. See example. func AgainstSchema(schema *spec.Schema, data interface{}, formats strfmt.Registry, options ...Option) error { - res := NewSchemaValidator(schema, nil, "", formats, options...).Validate(data) + res := NewSchemaValidator(schema, nil, "", formats, + append(options, WithRecycleValidators(true), withRecycleResults(true))..., + ).Validate(data) + defer func() { + pools.poolOfResults.RedeemResult(res) + }() + if res.HasErrors() { return errors.CompositeValidationError(res.Errors...) } + return nil } @@ -57,6 +57,15 @@ func AgainstSchema(schema *spec.Schema, data interface{}, formats strfmt.Registr // // Panics if the provided schema is invalid. func NewSchemaValidator(schema *spec.Schema, rootSchema interface{}, root string, formats strfmt.Registry, options ...Option) *SchemaValidator { + opts := new(SchemaValidatorOptions) + for _, o := range options { + o(opts) + } + + return newSchemaValidator(schema, rootSchema, root, formats, opts) +} + +func newSchemaValidator(schema *spec.Schema, rootSchema interface{}, root string, formats strfmt.Registry, opts *SchemaValidatorOptions) *SchemaValidator { if schema == nil { return nil } @@ -72,17 +81,26 @@ func NewSchemaValidator(schema *spec.Schema, rootSchema interface{}, root string panic(msg) } } - s := SchemaValidator{ - Path: root, - in: "body", - Schema: schema, - Root: rootSchema, - KnownFormats: formats, - Options: SchemaValidatorOptions{}} - for _, o := range options { - o(&s.Options) + + if opts == nil { + opts = new(SchemaValidatorOptions) } - s.validators = []valueValidator{ + + var s *SchemaValidator + if opts.recycleValidators { + s = pools.poolOfSchemaValidators.BorrowValidator() + } else { + s = new(SchemaValidator) + } + + s.Path = root + s.in = "body" + s.Schema = schema + s.Root = rootSchema + s.Options = opts + s.KnownFormats = formats + + s.validators = [8]valueValidator{ s.typeValidator(), s.schemaPropsValidator(), s.stringValidator(), @@ -92,7 +110,8 @@ func NewSchemaValidator(schema *spec.Schema, rootSchema interface{}, root string s.commonValidator(), s.objectValidator(), } - return &s + + return s } // SetPath sets the path for this schema valdiator @@ -101,24 +120,46 @@ func (s *SchemaValidator) SetPath(path string) { } // Applies returns true when this schema validator applies -func (s *SchemaValidator) Applies(source interface{}, kind reflect.Kind) bool { +func (s *SchemaValidator) Applies(source interface{}, _ reflect.Kind) bool { _, ok := source.(*spec.Schema) return ok } // Validate validates the data against the schema func (s *SchemaValidator) Validate(data interface{}) *Result { - result := &Result{data: data} if s == nil { - return result + return emptyResult } - if s.Schema != nil { + + if s.Options.recycleValidators { + defer func() { + s.redeemChildren() + s.redeem() // one-time use validator + }() + } + + var result *Result + if s.Options.recycleResult { + result = pools.poolOfResults.BorrowResult() + result.data = data + } else { + result = &Result{data: data} + } + + if s.Schema != nil && !s.Options.skipSchemataResult { result.addRootObjectSchemata(s.Schema) } if data == nil { + // early exit with minimal validation result.Merge(s.validators[0].Validate(data)) // type validator result.Merge(s.validators[6].Validate(data)) // common validator + + if s.Options.recycleValidators { + s.validators[0] = nil + s.validators[6] = nil + } + return result } @@ -147,6 +188,7 @@ func (s *SchemaValidator) Validate(data interface{}) *Result { if erri != nil { result.AddErrors(invalidTypeConversionMsg(s.Path, erri)) result.Inc() + return result } d = in @@ -155,6 +197,7 @@ func (s *SchemaValidator) Validate(data interface{}) *Result { if errf != nil { result.AddErrors(invalidTypeConversionMsg(s.Path, errf)) result.Inc() + return result } d = nf @@ -164,14 +207,26 @@ func (s *SchemaValidator) Validate(data interface{}) *Result { kind = tpe.Kind() } - for _, v := range s.validators { + for idx, v := range s.validators { if !v.Applies(s.Schema, kind) { - debugLog("%T does not apply for %v", v, kind) + if s.Options.recycleValidators { + // Validate won't be called, so relinquish this validator + if redeemableChildren, ok := v.(interface{ redeemChildren() }); ok { + redeemableChildren.redeemChildren() + } + if redeemable, ok := v.(interface{ redeem() }); ok { + redeemable.redeem() + } + s.validators[idx] = nil // prevents further (unsafe) usage + } + continue } - err := v.Validate(d) - result.Merge(err) + result.Merge(v.Validate(d)) + if s.Options.recycleValidators { + s.validators[idx] = nil // prevents further (unsafe) usage + } result.Inc() } result.Inc() @@ -180,81 +235,120 @@ func (s *SchemaValidator) Validate(data interface{}) *Result { } func (s *SchemaValidator) typeValidator() valueValidator { - return &typeValidator{Type: s.Schema.Type, Nullable: s.Schema.Nullable, Format: s.Schema.Format, In: s.in, Path: s.Path} + return newTypeValidator( + s.Path, + s.in, + s.Schema.Type, + s.Schema.Nullable, + s.Schema.Format, + s.Options, + ) } func (s *SchemaValidator) commonValidator() valueValidator { - return &basicCommonValidator{ - Path: s.Path, - In: s.in, - Enum: s.Schema.Enum, - } + return newBasicCommonValidator( + s.Path, + s.in, + s.Schema.Default, + s.Schema.Enum, + s.Options, + ) } func (s *SchemaValidator) sliceValidator() valueValidator { - return &schemaSliceValidator{ - Path: s.Path, - In: s.in, - MaxItems: s.Schema.MaxItems, - MinItems: s.Schema.MinItems, - UniqueItems: s.Schema.UniqueItems, - AdditionalItems: s.Schema.AdditionalItems, - Items: s.Schema.Items, - Root: s.Root, - KnownFormats: s.KnownFormats, - Options: s.Options, - } + return newSliceValidator( + s.Path, + s.in, + s.Schema.MaxItems, + s.Schema.MinItems, + s.Schema.UniqueItems, + s.Schema.AdditionalItems, + s.Schema.Items, + s.Root, + s.KnownFormats, + s.Options, + ) } func (s *SchemaValidator) numberValidator() valueValidator { - return &numberValidator{ - Path: s.Path, - In: s.in, - Default: s.Schema.Default, - MultipleOf: s.Schema.MultipleOf, - Maximum: s.Schema.Maximum, - ExclusiveMaximum: s.Schema.ExclusiveMaximum, - Minimum: s.Schema.Minimum, - ExclusiveMinimum: s.Schema.ExclusiveMinimum, - } + return newNumberValidator( + s.Path, + s.in, + s.Schema.Default, + s.Schema.MultipleOf, + s.Schema.Maximum, + s.Schema.ExclusiveMaximum, + s.Schema.Minimum, + s.Schema.ExclusiveMinimum, + "", + "", + s.Options, + ) } func (s *SchemaValidator) stringValidator() valueValidator { - return &stringValidator{ - Path: s.Path, - In: s.in, - MaxLength: s.Schema.MaxLength, - MinLength: s.Schema.MinLength, - Pattern: s.Schema.Pattern, - } + return newStringValidator( + s.Path, + s.in, + nil, + false, + false, + s.Schema.MaxLength, + s.Schema.MinLength, + s.Schema.Pattern, + s.Options, + ) } func (s *SchemaValidator) formatValidator() valueValidator { - return &formatValidator{ - Path: s.Path, - In: s.in, - Format: s.Schema.Format, - KnownFormats: s.KnownFormats, - } + return newFormatValidator( + s.Path, + s.in, + s.Schema.Format, + s.KnownFormats, + s.Options, + ) } func (s *SchemaValidator) schemaPropsValidator() valueValidator { sch := s.Schema - return newSchemaPropsValidator(s.Path, s.in, sch.AllOf, sch.OneOf, sch.AnyOf, sch.Not, sch.Dependencies, s.Root, s.KnownFormats, s.Options.Options()...) + return newSchemaPropsValidator( + s.Path, s.in, sch.AllOf, sch.OneOf, sch.AnyOf, sch.Not, sch.Dependencies, s.Root, s.KnownFormats, + s.Options, + ) } func (s *SchemaValidator) objectValidator() valueValidator { - return &objectValidator{ - Path: s.Path, - In: s.in, - MaxProperties: s.Schema.MaxProperties, - MinProperties: s.Schema.MinProperties, - Required: s.Schema.Required, - Properties: s.Schema.Properties, - AdditionalProperties: s.Schema.AdditionalProperties, - PatternProperties: s.Schema.PatternProperties, - Root: s.Root, - KnownFormats: s.KnownFormats, - Options: s.Options, + return newObjectValidator( + s.Path, + s.in, + s.Schema.MaxProperties, + s.Schema.MinProperties, + s.Schema.Required, + s.Schema.Properties, + s.Schema.AdditionalProperties, + s.Schema.PatternProperties, + s.Root, + s.KnownFormats, + s.Options, + ) +} + +func (s *SchemaValidator) redeem() { + pools.poolOfSchemaValidators.RedeemValidator(s) +} + +func (s *SchemaValidator) redeemChildren() { + for i, validator := range s.validators { + if validator == nil { + continue + } + if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok { + redeemableChildren.redeemChildren() + } + if redeemable, ok := validator.(interface{ redeem() }); ok { + redeemable.redeem() + } + s.validators[i] = nil // free up allocated children if not in pool } } diff --git a/vendor/github.com/go-openapi/validate/schema_option.go b/vendor/github.com/go-openapi/validate/schema_option.go index 4b4879de8b..65eeebeaab 100644 --- a/vendor/github.com/go-openapi/validate/schema_option.go +++ b/vendor/github.com/go-openapi/validate/schema_option.go @@ -18,6 +18,9 @@ package validate type SchemaValidatorOptions struct { EnableObjectArrayTypeCheck bool EnableArrayMustHaveItemsCheck bool + recycleValidators bool + recycleResult bool + skipSchemataResult bool } // Option sets optional rules for schema validation @@ -45,10 +48,36 @@ func SwaggerSchema(enable bool) Option { } } -// Options returns current options +// WithRecycleValidators saves memory allocations and makes validators +// available for a single use of Validate() only. +// +// When a validator is recycled, called MUST not call the Validate() method twice. +func WithRecycleValidators(enable bool) Option { + return func(svo *SchemaValidatorOptions) { + svo.recycleValidators = enable + } +} + +func withRecycleResults(enable bool) Option { + return func(svo *SchemaValidatorOptions) { + svo.recycleResult = enable + } +} + +// WithSkipSchemataResult skips the deep audit payload stored in validation Result +func WithSkipSchemataResult(enable bool) Option { + return func(svo *SchemaValidatorOptions) { + svo.skipSchemataResult = enable + } +} + +// Options returns the current set of options func (svo SchemaValidatorOptions) Options() []Option { return []Option{ EnableObjectArrayTypeCheck(svo.EnableObjectArrayTypeCheck), EnableArrayMustHaveItemsCheck(svo.EnableArrayMustHaveItemsCheck), + WithRecycleValidators(svo.recycleValidators), + withRecycleResults(svo.recycleResult), + WithSkipSchemataResult(svo.skipSchemataResult), } } diff --git a/vendor/github.com/go-openapi/validate/schema_props.go b/vendor/github.com/go-openapi/validate/schema_props.go index 9bac3d29fb..1ca379244d 100644 --- a/vendor/github.com/go-openapi/validate/schema_props.go +++ b/vendor/github.com/go-openapi/validate/schema_props.go @@ -30,211 +30,327 @@ type schemaPropsValidator struct { AnyOf []spec.Schema Not *spec.Schema Dependencies spec.Dependencies - anyOfValidators []SchemaValidator - allOfValidators []SchemaValidator - oneOfValidators []SchemaValidator + anyOfValidators []*SchemaValidator + allOfValidators []*SchemaValidator + oneOfValidators []*SchemaValidator notValidator *SchemaValidator Root interface{} KnownFormats strfmt.Registry - Options SchemaValidatorOptions + Options *SchemaValidatorOptions } func (s *schemaPropsValidator) SetPath(path string) { s.Path = path } -func newSchemaPropsValidator(path string, in string, allOf, oneOf, anyOf []spec.Schema, not *spec.Schema, deps spec.Dependencies, root interface{}, formats strfmt.Registry, options ...Option) *schemaPropsValidator { - anyValidators := make([]SchemaValidator, 0, len(anyOf)) - for _, v := range anyOf { - v := v - anyValidators = append(anyValidators, *NewSchemaValidator(&v, root, path, formats, options...)) +func newSchemaPropsValidator( + path string, in string, allOf, oneOf, anyOf []spec.Schema, not *spec.Schema, deps spec.Dependencies, root interface{}, formats strfmt.Registry, + opts *SchemaValidatorOptions) *schemaPropsValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) } - allValidators := make([]SchemaValidator, 0, len(allOf)) - for _, v := range allOf { - v := v - allValidators = append(allValidators, *NewSchemaValidator(&v, root, path, formats, options...)) + + anyValidators := make([]*SchemaValidator, 0, len(anyOf)) + for i := range anyOf { + anyValidators = append(anyValidators, newSchemaValidator(&anyOf[i], root, path, formats, opts)) + } + allValidators := make([]*SchemaValidator, 0, len(allOf)) + for i := range allOf { + allValidators = append(allValidators, newSchemaValidator(&allOf[i], root, path, formats, opts)) } - oneValidators := make([]SchemaValidator, 0, len(oneOf)) - for _, v := range oneOf { - v := v - oneValidators = append(oneValidators, *NewSchemaValidator(&v, root, path, formats, options...)) + oneValidators := make([]*SchemaValidator, 0, len(oneOf)) + for i := range oneOf { + oneValidators = append(oneValidators, newSchemaValidator(&oneOf[i], root, path, formats, opts)) } var notValidator *SchemaValidator if not != nil { - notValidator = NewSchemaValidator(not, root, path, formats, options...) - } - - schOptions := &SchemaValidatorOptions{} - for _, o := range options { - o(schOptions) - } - return &schemaPropsValidator{ - Path: path, - In: in, - AllOf: allOf, - OneOf: oneOf, - AnyOf: anyOf, - Not: not, - Dependencies: deps, - anyOfValidators: anyValidators, - allOfValidators: allValidators, - oneOfValidators: oneValidators, - notValidator: notValidator, - Root: root, - KnownFormats: formats, - Options: *schOptions, + notValidator = newSchemaValidator(not, root, path, formats, opts) + } + + var s *schemaPropsValidator + if opts.recycleValidators { + s = pools.poolOfSchemaPropsValidators.BorrowValidator() + } else { + s = new(schemaPropsValidator) } + + s.Path = path + s.In = in + s.AllOf = allOf + s.OneOf = oneOf + s.AnyOf = anyOf + s.Not = not + s.Dependencies = deps + s.anyOfValidators = anyValidators + s.allOfValidators = allValidators + s.oneOfValidators = oneValidators + s.notValidator = notValidator + s.Root = root + s.KnownFormats = formats + s.Options = opts + + return s } -func (s *schemaPropsValidator) Applies(source interface{}, kind reflect.Kind) bool { - r := reflect.TypeOf(source) == specSchemaType - debugLog("schema props validator for %q applies %t for %T (kind: %v)\n", s.Path, r, source, kind) - return r +func (s *schemaPropsValidator) Applies(source interface{}, _ reflect.Kind) bool { + _, isSchema := source.(*spec.Schema) + return isSchema } func (s *schemaPropsValidator) Validate(data interface{}) *Result { - mainResult := new(Result) + var mainResult *Result + if s.Options.recycleResult { + mainResult = pools.poolOfResults.BorrowResult() + } else { + mainResult = new(Result) + } // Intermediary error results // IMPORTANT! messages from underlying validators - keepResultAnyOf := new(Result) - keepResultOneOf := new(Result) - keepResultAllOf := new(Result) + var keepResultAnyOf, keepResultOneOf, keepResultAllOf *Result + + if s.Options.recycleValidators { + defer func() { + s.redeemChildren() + s.redeem() + + // results are redeemed when merged + }() + } - // Validates at least one in anyOf schemas - var firstSuccess *Result if len(s.anyOfValidators) > 0 { - var bestFailures *Result - succeededOnce := false - for _, anyOfSchema := range s.anyOfValidators { - result := anyOfSchema.Validate(data) - // We keep inner IMPORTANT! errors no matter what MatchCount tells us - keepResultAnyOf.Merge(result.keepRelevantErrors()) - if result.IsValid() { - bestFailures = nil - succeededOnce = true - if firstSuccess == nil { - firstSuccess = result - } - keepResultAnyOf = new(Result) - break - } - // MatchCount is used to select errors from the schema with most positive checks - if bestFailures == nil || result.MatchCount > bestFailures.MatchCount { - bestFailures = result + keepResultAnyOf = pools.poolOfResults.BorrowResult() + s.validateAnyOf(data, mainResult, keepResultAnyOf) + } + + if len(s.oneOfValidators) > 0 { + keepResultOneOf = pools.poolOfResults.BorrowResult() + s.validateOneOf(data, mainResult, keepResultOneOf) + } + + if len(s.allOfValidators) > 0 { + keepResultAllOf = pools.poolOfResults.BorrowResult() + s.validateAllOf(data, mainResult, keepResultAllOf) + } + + if s.notValidator != nil { + s.validateNot(data, mainResult) + } + + if s.Dependencies != nil && len(s.Dependencies) > 0 && reflect.TypeOf(data).Kind() == reflect.Map { + s.validateDependencies(data, mainResult) + } + + mainResult.Inc() + + // In the end we retain best failures for schema validation + // plus, if any, composite errors which may explain special cases (tagged as IMPORTANT!). + return mainResult.Merge(keepResultAllOf, keepResultOneOf, keepResultAnyOf) +} + +func (s *schemaPropsValidator) validateAnyOf(data interface{}, mainResult, keepResultAnyOf *Result) { + // Validates at least one in anyOf schemas + var bestFailures *Result + + for i, anyOfSchema := range s.anyOfValidators { + result := anyOfSchema.Validate(data) + if s.Options.recycleValidators { + s.anyOfValidators[i] = nil + } + // We keep inner IMPORTANT! errors no matter what MatchCount tells us + keepResultAnyOf.Merge(result.keepRelevantErrors()) // merges (and redeems) a new instance of Result + + if result.IsValid() { + if bestFailures != nil && bestFailures.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(bestFailures) } + + _ = keepResultAnyOf.cleared() + mainResult.Merge(result) + + return } - if !succeededOnce { - mainResult.AddErrors(mustValidateAtLeastOneSchemaMsg(s.Path)) + // MatchCount is used to select errors from the schema with most positive checks + if bestFailures == nil || result.MatchCount > bestFailures.MatchCount { + if bestFailures != nil && bestFailures.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(bestFailures) + } + bestFailures = result + + continue } - if bestFailures != nil { - mainResult.Merge(bestFailures) - } else if firstSuccess != nil { - mainResult.Merge(firstSuccess) + + if result.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(result) // this result is ditched } } + mainResult.AddErrors(mustValidateAtLeastOneSchemaMsg(s.Path)) + mainResult.Merge(bestFailures) +} + +func (s *schemaPropsValidator) validateOneOf(data interface{}, mainResult, keepResultOneOf *Result) { // Validates exactly one in oneOf schemas - if len(s.oneOfValidators) > 0 { - var bestFailures *Result - var firstSuccess *Result - validated := 0 - - for _, oneOfSchema := range s.oneOfValidators { - result := oneOfSchema.Validate(data) - // We keep inner IMPORTANT! errors no matter what MatchCount tells us - keepResultOneOf.Merge(result.keepRelevantErrors()) - if result.IsValid() { - validated++ - bestFailures = nil - if firstSuccess == nil { - firstSuccess = result - } - keepResultOneOf = new(Result) - continue - } - // MatchCount is used to select errors from the schema with most positive checks - if validated == 0 && (bestFailures == nil || result.MatchCount > bestFailures.MatchCount) { - bestFailures = result - } + var ( + firstSuccess, bestFailures *Result + validated int + ) + + for i, oneOfSchema := range s.oneOfValidators { + result := oneOfSchema.Validate(data) + if s.Options.recycleValidators { + s.oneOfValidators[i] = nil } - if validated != 1 { - var additionalMsg string - if validated == 0 { - additionalMsg = "Found none valid" - } else { - additionalMsg = fmt.Sprintf("Found %d valid alternatives", validated) - } + // We keep inner IMPORTANT! errors no matter what MatchCount tells us + keepResultOneOf.Merge(result.keepRelevantErrors()) // merges (and redeems) a new instance of Result - mainResult.AddErrors(mustValidateOnlyOneSchemaMsg(s.Path, additionalMsg)) - if bestFailures != nil { - mainResult.Merge(bestFailures) - } - } else if firstSuccess != nil { - mainResult.Merge(firstSuccess) - } - } + if result.IsValid() { + validated++ + _ = keepResultOneOf.cleared() - // Validates all of allOf schemas - if len(s.allOfValidators) > 0 { - validated := 0 - - for _, allOfSchema := range s.allOfValidators { - result := allOfSchema.Validate(data) - // We keep inner IMPORTANT! errors no matter what MatchCount tells us - keepResultAllOf.Merge(result.keepRelevantErrors()) - // keepResultAllOf.Merge(result) - if result.IsValid() { - validated++ + if firstSuccess == nil { + firstSuccess = result + } else if result.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(result) // this result is ditched } - mainResult.Merge(result) + + continue } - if validated != len(s.allOfValidators) { - additionalMsg := "" - if validated == 0 { - additionalMsg = ". None validated" + // MatchCount is used to select errors from the schema with most positive checks + if validated == 0 && (bestFailures == nil || result.MatchCount > bestFailures.MatchCount) { + if bestFailures != nil && bestFailures.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(bestFailures) } + bestFailures = result + } else if result.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(result) // this result is ditched + } + } - mainResult.AddErrors(mustValidateAllSchemasMsg(s.Path, additionalMsg)) + switch validated { + case 0: + mainResult.AddErrors(mustValidateOnlyOneSchemaMsg(s.Path, "Found none valid")) + mainResult.Merge(bestFailures) + // firstSucess necessarily nil + case 1: + mainResult.Merge(firstSuccess) + if bestFailures != nil && bestFailures.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(bestFailures) + } + default: + mainResult.AddErrors(mustValidateOnlyOneSchemaMsg(s.Path, fmt.Sprintf("Found %d valid alternatives", validated))) + mainResult.Merge(bestFailures) + if firstSuccess != nil && firstSuccess.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(firstSuccess) } } +} - if s.notValidator != nil { - result := s.notValidator.Validate(data) +func (s *schemaPropsValidator) validateAllOf(data interface{}, mainResult, keepResultAllOf *Result) { + // Validates all of allOf schemas + var validated int + + for i, allOfSchema := range s.allOfValidators { + result := allOfSchema.Validate(data) + if s.Options.recycleValidators { + s.allOfValidators[i] = nil + } // We keep inner IMPORTANT! errors no matter what MatchCount tells us + keepResultAllOf.Merge(result.keepRelevantErrors()) if result.IsValid() { - mainResult.AddErrors(mustNotValidatechemaMsg(s.Path)) + validated++ } + mainResult.Merge(result) } - if s.Dependencies != nil && len(s.Dependencies) > 0 && reflect.TypeOf(data).Kind() == reflect.Map { - val := data.(map[string]interface{}) - for key := range val { - if dep, ok := s.Dependencies[key]; ok { + switch validated { + case 0: + mainResult.AddErrors(mustValidateAllSchemasMsg(s.Path, ". None validated")) + case len(s.allOfValidators): + default: + mainResult.AddErrors(mustValidateAllSchemasMsg(s.Path, "")) + } +} - if dep.Schema != nil { - mainResult.Merge(NewSchemaValidator(dep.Schema, s.Root, s.Path+"."+key, s.KnownFormats, s.Options.Options()...).Validate(data)) - continue - } +func (s *schemaPropsValidator) validateNot(data interface{}, mainResult *Result) { + result := s.notValidator.Validate(data) + if s.Options.recycleValidators { + s.notValidator = nil + } + // We keep inner IMPORTANT! errors no matter what MatchCount tells us + if result.IsValid() { + mainResult.AddErrors(mustNotValidatechemaMsg(s.Path)) + } + if result.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(result) // this result is ditched + } +} + +func (s *schemaPropsValidator) validateDependencies(data interface{}, mainResult *Result) { + val := data.(map[string]interface{}) + for key := range val { + dep, ok := s.Dependencies[key] + if !ok { + continue + } + + if dep.Schema != nil { + mainResult.Merge( + newSchemaValidator(dep.Schema, s.Root, s.Path+"."+key, s.KnownFormats, s.Options).Validate(data), + ) + continue + } - if len(dep.Property) > 0 { - for _, depKey := range dep.Property { - if _, ok := val[depKey]; !ok { - mainResult.AddErrors(hasADependencyMsg(s.Path, depKey)) - } - } + if len(dep.Property) > 0 { + for _, depKey := range dep.Property { + if _, ok := val[depKey]; !ok { + mainResult.AddErrors(hasADependencyMsg(s.Path, depKey)) } } } } +} - mainResult.Inc() - // In the end we retain best failures for schema validation - // plus, if any, composite errors which may explain special cases (tagged as IMPORTANT!). - return mainResult.Merge(keepResultAllOf, keepResultOneOf, keepResultAnyOf) +func (s *schemaPropsValidator) redeem() { + pools.poolOfSchemaPropsValidators.RedeemValidator(s) +} + +func (s *schemaPropsValidator) redeemChildren() { + for _, v := range s.anyOfValidators { + if v == nil { + continue + } + v.redeemChildren() + v.redeem() + } + s.anyOfValidators = nil + + for _, v := range s.allOfValidators { + if v == nil { + continue + } + v.redeemChildren() + v.redeem() + } + s.allOfValidators = nil + + for _, v := range s.oneOfValidators { + if v == nil { + continue + } + v.redeemChildren() + v.redeem() + } + s.oneOfValidators = nil + + if s.notValidator != nil { + s.notValidator.redeemChildren() + s.notValidator.redeem() + s.notValidator = nil + } } diff --git a/vendor/github.com/go-openapi/validate/slice_validator.go b/vendor/github.com/go-openapi/validate/slice_validator.go index aa429f5184..13bb02087d 100644 --- a/vendor/github.com/go-openapi/validate/slice_validator.go +++ b/vendor/github.com/go-openapi/validate/slice_validator.go @@ -32,7 +32,36 @@ type schemaSliceValidator struct { Items *spec.SchemaOrArray Root interface{} KnownFormats strfmt.Registry - Options SchemaValidatorOptions + Options *SchemaValidatorOptions +} + +func newSliceValidator(path, in string, + maxItems, minItems *int64, uniqueItems bool, + additionalItems *spec.SchemaOrBool, items *spec.SchemaOrArray, + root interface{}, formats strfmt.Registry, opts *SchemaValidatorOptions) *schemaSliceValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var v *schemaSliceValidator + if opts.recycleValidators { + v = pools.poolOfSliceValidators.BorrowValidator() + } else { + v = new(schemaSliceValidator) + } + + v.Path = path + v.In = in + v.MaxItems = maxItems + v.MinItems = minItems + v.UniqueItems = uniqueItems + v.AdditionalItems = additionalItems + v.Items = items + v.Root = root + v.KnownFormats = formats + v.Options = opts + + return v } func (s *schemaSliceValidator) SetPath(path string) { @@ -46,7 +75,18 @@ func (s *schemaSliceValidator) Applies(source interface{}, kind reflect.Kind) bo } func (s *schemaSliceValidator) Validate(data interface{}) *Result { - result := new(Result) + if s.Options.recycleValidators { + defer func() { + s.redeem() + }() + } + + var result *Result + if s.Options.recycleResult { + result = pools.poolOfResults.BorrowResult() + } else { + result = new(Result) + } if data == nil { return result } @@ -54,8 +94,8 @@ func (s *schemaSliceValidator) Validate(data interface{}) *Result { size := val.Len() if s.Items != nil && s.Items.Schema != nil { - validator := NewSchemaValidator(s.Items.Schema, s.Root, s.Path, s.KnownFormats, s.Options.Options()...) for i := 0; i < size; i++ { + validator := newSchemaValidator(s.Items.Schema, s.Root, s.Path, s.KnownFormats, s.Options) validator.SetPath(fmt.Sprintf("%s.%d", s.Path, i)) value := val.Index(i) result.mergeForSlice(val, i, validator.Validate(value.Interface())) @@ -66,10 +106,11 @@ func (s *schemaSliceValidator) Validate(data interface{}) *Result { if s.Items != nil && len(s.Items.Schemas) > 0 { itemsSize = len(s.Items.Schemas) for i := 0; i < itemsSize; i++ { - validator := NewSchemaValidator(&s.Items.Schemas[i], s.Root, fmt.Sprintf("%s.%d", s.Path, i), s.KnownFormats, s.Options.Options()...) - if val.Len() <= i { + if size <= i { break } + + validator := newSchemaValidator(&s.Items.Schemas[i], s.Root, fmt.Sprintf("%s.%d", s.Path, i), s.KnownFormats, s.Options) result.mergeForSlice(val, i, validator.Validate(val.Index(i).Interface())) } } @@ -79,7 +120,7 @@ func (s *schemaSliceValidator) Validate(data interface{}) *Result { } if s.AdditionalItems.Schema != nil { for i := itemsSize; i < size-itemsSize+1; i++ { - validator := NewSchemaValidator(s.AdditionalItems.Schema, s.Root, fmt.Sprintf("%s.%d", s.Path, i), s.KnownFormats, s.Options.Options()...) + validator := newSchemaValidator(s.AdditionalItems.Schema, s.Root, fmt.Sprintf("%s.%d", s.Path, i), s.KnownFormats, s.Options) result.mergeForSlice(val, i, validator.Validate(val.Index(i).Interface())) } } @@ -103,3 +144,7 @@ func (s *schemaSliceValidator) Validate(data interface{}) *Result { result.Inc() return result } + +func (s *schemaSliceValidator) redeem() { + pools.poolOfSliceValidators.RedeemValidator(s) +} diff --git a/vendor/github.com/go-openapi/validate/spec.go b/vendor/github.com/go-openapi/validate/spec.go index dff01f00be..965452566e 100644 --- a/vendor/github.com/go-openapi/validate/spec.go +++ b/vendor/github.com/go-openapi/validate/spec.go @@ -15,6 +15,8 @@ package validate import ( + "bytes" + "encoding/gob" "encoding/json" "fmt" "sort" @@ -26,23 +28,23 @@ import ( "github.com/go-openapi/loads" "github.com/go-openapi/spec" "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" ) // Spec validates an OpenAPI 2.0 specification document. // // Returns an error flattening in a single standard error, all validation messages. // -// - TODO: $ref should not have siblings -// - TODO: make sure documentation reflects all checks and warnings -// - TODO: check on discriminators -// - TODO: explicit message on unsupported keywords (better than "forbidden property"...) -// - TODO: full list of unresolved refs -// - TODO: validate numeric constraints (issue#581): this should be handled like defaults and examples -// - TODO: option to determine if we validate for go-swagger or in a more general context -// - TODO: check on required properties to support anyOf, allOf, oneOf +// - TODO: $ref should not have siblings +// - TODO: make sure documentation reflects all checks and warnings +// - TODO: check on discriminators +// - TODO: explicit message on unsupported keywords (better than "forbidden property"...) +// - TODO: full list of unresolved refs +// - TODO: validate numeric constraints (issue#581): this should be handled like defaults and examples +// - TODO: option to determine if we validate for go-swagger or in a more general context +// - TODO: check on required properties to support anyOf, allOf, oneOf // // NOTE: SecurityScopes are maps: no need to check uniqueness -// func Spec(doc *loads.Document, formats strfmt.Registry) error { errs, _ /*warns*/ := NewSpecValidator(doc.Schema(), formats).Validate(doc) if errs.HasErrors() { @@ -53,25 +55,38 @@ func Spec(doc *loads.Document, formats strfmt.Registry) error { // SpecValidator validates a swagger 2.0 spec type SpecValidator struct { - schema *spec.Schema // swagger 2.0 schema - spec *loads.Document - analyzer *analysis.Spec - expanded *loads.Document - KnownFormats strfmt.Registry - Options Opts // validation options + schema *spec.Schema // swagger 2.0 schema + spec *loads.Document + analyzer *analysis.Spec + expanded *loads.Document + KnownFormats strfmt.Registry + Options Opts // validation options + schemaOptions *SchemaValidatorOptions } // NewSpecValidator creates a new swagger spec validator instance func NewSpecValidator(schema *spec.Schema, formats strfmt.Registry) *SpecValidator { + // schema options that apply to all called validators + schemaOptions := new(SchemaValidatorOptions) + for _, o := range []Option{ + SwaggerSchema(true), + WithRecycleValidators(true), + // withRecycleResults(true), + } { + o(schemaOptions) + } + return &SpecValidator{ - schema: schema, - KnownFormats: formats, - Options: defaultOpts, + schema: schema, + KnownFormats: formats, + Options: defaultOpts, + schemaOptions: schemaOptions, } } // Validate validates the swagger spec func (s *SpecValidator) Validate(data interface{}) (*Result, *Result) { + s.schemaOptions.skipSchemataResult = s.Options.SkipSchemataResult var sd *loads.Document errs, warnings := new(Result), new(Result) @@ -85,11 +100,8 @@ func (s *SpecValidator) Validate(data interface{}) (*Result, *Result) { s.spec = sd s.analyzer = analysis.New(sd.Spec()) - // Swagger schema validator - schv := NewSchemaValidator(s.schema, nil, "", s.KnownFormats, SwaggerSchema(true)) - var obj interface{} - // Raw spec unmarshalling errors + var obj interface{} if err := json.Unmarshal(sd.Raw(), &obj); err != nil { // NOTE: under normal conditions, the *load.Document has been already unmarshalled // So this one is just a paranoid check on the behavior of the spec package @@ -103,6 +115,8 @@ func (s *SpecValidator) Validate(data interface{}) (*Result, *Result) { warnings.AddErrors(errs.Warnings...) }() + // Swagger schema validator + schv := newSchemaValidator(s.schema, nil, "", s.KnownFormats, s.schemaOptions) errs.Merge(schv.Validate(obj)) // error - // There may be a point in continuing to try and determine more accurate errors if !s.Options.ContinueOnErrors && errs.HasErrors() { @@ -130,13 +144,13 @@ func (s *SpecValidator) Validate(data interface{}) (*Result, *Result) { } // Values provided as default MUST validate their schema - df := &defaultValidator{SpecValidator: s} + df := &defaultValidator{SpecValidator: s, schemaOptions: s.schemaOptions} errs.Merge(df.Validate()) // Values provided as examples MUST validate their schema // Value provided as examples in a response without schema generate a warning // Known limitations: examples in responses for mime type not application/json are ignored (warning) - ex := &exampleValidator{SpecValidator: s} + ex := &exampleValidator{SpecValidator: s, schemaOptions: s.schemaOptions} errs.Merge(ex.Validate()) errs.Merge(s.validateNonEmptyPathParamNames()) @@ -148,22 +162,27 @@ func (s *SpecValidator) Validate(data interface{}) (*Result, *Result) { } func (s *SpecValidator) validateNonEmptyPathParamNames() *Result { - res := new(Result) + res := pools.poolOfResults.BorrowResult() if s.spec.Spec().Paths == nil { // There is no Paths object: error res.AddErrors(noValidPathMsg()) - } else { - if s.spec.Spec().Paths.Paths == nil { - // Paths may be empty: warning - res.AddWarnings(noValidPathMsg()) - } else { - for k := range s.spec.Spec().Paths.Paths { - if strings.Contains(k, "{}") { - res.AddErrors(emptyPathParameterMsg(k)) - } - } + + return res + } + + if s.spec.Spec().Paths.Paths == nil { + // Paths may be empty: warning + res.AddWarnings(noValidPathMsg()) + + return res + } + + for k := range s.spec.Spec().Paths.Paths { + if strings.Contains(k, "{}") { + res.AddErrors(emptyPathParameterMsg(k)) } } + return res } @@ -177,7 +196,7 @@ func (s *SpecValidator) validateDuplicateOperationIDs() *Result { // fallback on possible incomplete picture because of previous errors analyzer = s.analyzer } - res := new(Result) + res := pools.poolOfResults.BorrowResult() known := make(map[string]int) for _, v := range analyzer.OperationIDs() { if v != "" { @@ -199,7 +218,7 @@ type dupProp struct { func (s *SpecValidator) validateDuplicatePropertyNames() *Result { // definition can't declare a property that's already defined by one of its ancestors - res := new(Result) + res := pools.poolOfResults.BorrowResult() for k, sch := range s.spec.Spec().Definitions { if len(sch.AllOf) == 0 { continue @@ -248,7 +267,7 @@ func (s *SpecValidator) validateSchemaPropertyNames(nm string, sch spec.Schema, schn := nm schc := &sch - res := new(Result) + res := pools.poolOfResults.BorrowResult() for schc.Ref.String() != "" { // gather property names @@ -285,7 +304,7 @@ func (s *SpecValidator) validateSchemaPropertyNames(nm string, sch spec.Schema, } func (s *SpecValidator) validateCircularAncestry(nm string, sch spec.Schema, knowns map[string]struct{}) ([]string, *Result) { - res := new(Result) + res := pools.poolOfResults.BorrowResult() if sch.Ref.String() == "" && len(sch.AllOf) == 0 { // Safeguard. We should not be able to actually get there return nil, res @@ -335,7 +354,7 @@ func (s *SpecValidator) validateCircularAncestry(nm string, sch spec.Schema, kno func (s *SpecValidator) validateItems() *Result { // validate parameter, items, schema and response objects for presence of item if type is array - res := new(Result) + res := pools.poolOfResults.BorrowResult() for method, pi := range s.analyzer.Operations() { for path, op := range pi { @@ -394,7 +413,7 @@ func (s *SpecValidator) validateItems() *Result { // Verifies constraints on array type func (s *SpecValidator) validateSchemaItems(schema spec.Schema, prefix, opID string) *Result { - res := new(Result) + res := pools.poolOfResults.BorrowResult() if !schema.Type.Contains(arrayType) { return res } @@ -418,7 +437,7 @@ func (s *SpecValidator) validateSchemaItems(schema spec.Schema, prefix, opID str func (s *SpecValidator) validatePathParamPresence(path string, fromPath, fromOperation []string) *Result { // Each defined operation path parameters must correspond to a named element in the API's path pattern. // (For example, you cannot have a path parameter named id for the following path /pets/{petId} but you must have a path parameter named petId.) - res := new(Result) + res := pools.poolOfResults.BorrowResult() for _, l := range fromPath { var matched bool for _, r := range fromOperation { @@ -456,7 +475,6 @@ func (s *SpecValidator) validateReferenced() *Result { return &res } -// nolint: dupl func (s *SpecValidator) validateReferencedParameters() *Result { // Each referenceable definition should have references. params := s.spec.Spec().Parameters @@ -475,14 +493,13 @@ func (s *SpecValidator) validateReferencedParameters() *Result { if len(expected) == 0 { return nil } - result := new(Result) + result := pools.poolOfResults.BorrowResult() for k := range expected { result.AddWarnings(unusedParamMsg(k)) } return result } -// nolint: dupl func (s *SpecValidator) validateReferencedResponses() *Result { // Each referenceable definition should have references. responses := s.spec.Spec().Responses @@ -501,14 +518,13 @@ func (s *SpecValidator) validateReferencedResponses() *Result { if len(expected) == 0 { return nil } - result := new(Result) + result := pools.poolOfResults.BorrowResult() for k := range expected { result.AddWarnings(unusedResponseMsg(k)) } return result } -// nolint: dupl func (s *SpecValidator) validateReferencedDefinitions() *Result { // Each referenceable definition must have references. defs := s.spec.Spec().Definitions @@ -537,7 +553,7 @@ func (s *SpecValidator) validateReferencedDefinitions() *Result { func (s *SpecValidator) validateRequiredDefinitions() *Result { // Each property listed in the required array must be defined in the properties of the model - res := new(Result) + res := pools.poolOfResults.BorrowResult() DEFINITIONS: for d, schema := range s.spec.Spec().Definitions { @@ -556,7 +572,7 @@ DEFINITIONS: func (s *SpecValidator) validateRequiredProperties(path, in string, v *spec.Schema) *Result { // Takes care of recursive property definitions, which may be nested in additionalProperties schemas - res := new(Result) + res := pools.poolOfResults.BorrowResult() propertyMatch := false patternMatch := false additionalPropertiesMatch := false @@ -615,40 +631,42 @@ func (s *SpecValidator) validateRequiredProperties(path, in string, v *spec.Sche func (s *SpecValidator) validateParameters() *Result { // - for each method, path is unique, regardless of path parameters // e.g. GET:/petstore/{id}, GET:/petstore/{pet}, GET:/petstore are - // considered duplicate paths + // considered duplicate paths, if StrictPathParamUniqueness is enabled. // - each parameter should have a unique `name` and `type` combination // - each operation should have only 1 parameter of type body // - there must be at most 1 parameter in body // - parameters with pattern property must specify valid patterns // - $ref in parameters must resolve // - path param must be required - res := new(Result) + res := pools.poolOfResults.BorrowResult() rexGarbledPathSegment := mustCompileRegexp(`.*[{}\s]+.*`) for method, pi := range s.expandedAnalyzer().Operations() { methodPaths := make(map[string]map[string]string) for path, op := range pi { - pathToAdd := pathHelp.stripParametersInPath(path) + if s.Options.StrictPathParamUniqueness { + pathToAdd := pathHelp.stripParametersInPath(path) - // Warn on garbled path afer param stripping - if rexGarbledPathSegment.MatchString(pathToAdd) { - res.AddWarnings(pathStrippedParamGarbledMsg(pathToAdd)) - } + // Warn on garbled path afer param stripping + if rexGarbledPathSegment.MatchString(pathToAdd) { + res.AddWarnings(pathStrippedParamGarbledMsg(pathToAdd)) + } - // Check uniqueness of stripped paths - if _, found := methodPaths[method][pathToAdd]; found { + // Check uniqueness of stripped paths + if _, found := methodPaths[method][pathToAdd]; found { - // Sort names for stable, testable output - if strings.Compare(path, methodPaths[method][pathToAdd]) < 0 { - res.AddErrors(pathOverlapMsg(path, methodPaths[method][pathToAdd])) + // Sort names for stable, testable output + if strings.Compare(path, methodPaths[method][pathToAdd]) < 0 { + res.AddErrors(pathOverlapMsg(path, methodPaths[method][pathToAdd])) + } else { + res.AddErrors(pathOverlapMsg(methodPaths[method][pathToAdd], path)) + } } else { - res.AddErrors(pathOverlapMsg(methodPaths[method][pathToAdd], path)) - } - } else { - if _, found := methodPaths[method]; !found { - methodPaths[method] = map[string]string{} - } - methodPaths[method][pathToAdd] = path // Original non stripped path + if _, found := methodPaths[method]; !found { + methodPaths[method] = map[string]string{} + } + methodPaths[method][pathToAdd] = path // Original non stripped path + } } var bodyParams []string @@ -659,7 +677,23 @@ func (s *SpecValidator) validateParameters() *Result { // TODO: should be done after param expansion res.Merge(s.checkUniqueParams(path, method, op)) + // pick the root schema from the swagger specification which describes a parameter + origSchema, ok := s.schema.Definitions["parameter"] + if !ok { + panic("unexpected swagger schema: missing #/definitions/parameter") + } + // clone it once to avoid expanding a global schema (e.g. swagger spec) + paramSchema, err := deepCloneSchema(origSchema) + if err != nil { + panic(fmt.Errorf("can't clone schema: %v", err)) + } + for _, pr := range paramHelp.safeExpandedParamsFor(path, method, op.ID, res, s) { + // An expanded parameter must validate the Parameter schema (an unexpanded $ref always passes high-level schema validation) + schv := newSchemaValidator(¶mSchema, s.schema, fmt.Sprintf("%s.%s.parameters.%s", path, method, pr.Name), s.KnownFormats, s.schemaOptions) + obj := swag.ToDynamicJSON(pr) + res.Merge(schv.Validate(obj)) + // Validate pattern regexp for parameters with a Pattern property if _, err := compileRegexp(pr.Pattern); err != nil { res.AddErrors(invalidPatternInParamMsg(op.ID, pr.Name, pr.Pattern)) @@ -741,7 +775,7 @@ func (s *SpecValidator) validateParameters() *Result { func (s *SpecValidator) validateReferencesValid() *Result { // each reference must point to a valid object - res := new(Result) + res := pools.poolOfResults.BorrowResult() for _, r := range s.analyzer.AllRefs() { if !r.IsValidURI(s.spec.SpecFilePath()) { // Safeguard - spec should always yield a valid URI res.AddErrors(invalidRefMsg(r.String())) @@ -767,7 +801,7 @@ func (s *SpecValidator) checkUniqueParams(path, method string, op *spec.Operatio // However, there are some issues with such a factorization: // - analysis does not seem to fully expand params // - param keys may be altered by x-go-name - res := new(Result) + res := pools.poolOfResults.BorrowResult() pnames := make(map[string]struct{}) if op.Parameters != nil { // Safeguard @@ -802,3 +836,17 @@ func (s *SpecValidator) expandedAnalyzer() *analysis.Spec { } return s.analyzer } + +func deepCloneSchema(src spec.Schema) (spec.Schema, error) { + var b bytes.Buffer + if err := gob.NewEncoder(&b).Encode(src); err != nil { + return spec.Schema{}, err + } + + var dst spec.Schema + if err := gob.NewDecoder(&b).Decode(&dst); err != nil { + return spec.Schema{}, err + } + + return dst, nil +} diff --git a/vendor/github.com/go-openapi/validate/spec_messages.go b/vendor/github.com/go-openapi/validate/spec_messages.go index b3757adddb..6d1f0f819c 100644 --- a/vendor/github.com/go-openapi/validate/spec_messages.go +++ b/vendor/github.com/go-openapi/validate/spec_messages.go @@ -187,6 +187,8 @@ const ( // UnusedResponseWarning ... UnusedResponseWarning = "response %q is not used anywhere" + + InvalidObject = "expected an object in %q.%s" ) // Additional error codes @@ -347,11 +349,15 @@ func invalidParameterDefinitionAsSchemaMsg(path, method, operationID string) err func parameterValidationTypeMismatchMsg(param, path, typ string) errors.Error { return errors.New(errors.CompositeErrorCode, ParamValidationTypeMismatch, param, path, typ) } +func invalidObjectMsg(path, in string) errors.Error { + return errors.New(errors.CompositeErrorCode, InvalidObject, path, in) +} // disabled -// func invalidResponseDefinitionAsSchemaMsg(path, method string) errors.Error { -// return errors.New(errors.CompositeErrorCode, InvalidResponseDefinitionAsSchemaError, path, method) -// } +// +// func invalidResponseDefinitionAsSchemaMsg(path, method string) errors.Error { +// return errors.New(errors.CompositeErrorCode, InvalidResponseDefinitionAsSchemaError, path, method) +// } func someParametersBrokenMsg(path, method, operationID string) errors.Error { return errors.New(errors.CompositeErrorCode, SomeParametersBrokenError, path, method, operationID) } diff --git a/vendor/github.com/go-openapi/validate/type.go b/vendor/github.com/go-openapi/validate/type.go index 876467588f..f87abb3d56 100644 --- a/vendor/github.com/go-openapi/validate/type.go +++ b/vendor/github.com/go-openapi/validate/type.go @@ -25,11 +25,34 @@ import ( ) type typeValidator struct { + Path string + In string Type spec.StringOrArray Nullable bool Format string - In string - Path string + Options *SchemaValidatorOptions +} + +func newTypeValidator(path, in string, typ spec.StringOrArray, nullable bool, format string, opts *SchemaValidatorOptions) *typeValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var t *typeValidator + if opts.recycleValidators { + t = pools.poolOfTypeValidators.BorrowValidator() + } else { + t = new(typeValidator) + } + + t.Path = path + t.In = in + t.Type = typ + t.Nullable = nullable + t.Format = format + t.Options = opts + + return t } func (t *typeValidator) schemaInfoForType(data interface{}) (string, string) { @@ -90,7 +113,7 @@ func (t *typeValidator) schemaInfoForType(data interface{}) (string, string) { default: val := reflect.ValueOf(data) tpe := val.Type() - switch tpe.Kind() { + switch tpe.Kind() { //nolint:exhaustive case reflect.Bool: return booleanType, "" case reflect.String: @@ -125,23 +148,33 @@ func (t *typeValidator) SetPath(path string) { t.Path = path } -func (t *typeValidator) Applies(source interface{}, kind reflect.Kind) bool { +func (t *typeValidator) Applies(source interface{}, _ reflect.Kind) bool { // typeValidator applies to Schema, Parameter and Header objects - stpe := reflect.TypeOf(source) - r := (len(t.Type) > 0 || t.Format != "") && (stpe == specSchemaType || stpe == specParameterType || stpe == specHeaderType) - debugLog("type validator for %q applies %t for %T (kind: %v)\n", t.Path, r, source, kind) - return r + switch source.(type) { + case *spec.Schema: + case *spec.Parameter: + case *spec.Header: + default: + return false + } + + return (len(t.Type) > 0 || t.Format != "") } func (t *typeValidator) Validate(data interface{}) *Result { - result := new(Result) - result.Inc() + if t.Options.recycleValidators { + defer func() { + t.redeem() + }() + } + if data == nil { // nil or zero value for the passed structure require Type: null if len(t.Type) > 0 && !t.Type.Contains(nullType) && !t.Nullable { // TODO: if a property is not required it also passes this - return errorHelp.sErr(errors.InvalidType(t.Path, t.In, strings.Join(t.Type, ","), nullType)) + return errorHelp.sErr(errors.InvalidType(t.Path, t.In, strings.Join(t.Type, ","), nullType), t.Options.recycleResult) } - return result + + return emptyResult } // check if the type matches, should be used in every validator chain as first item @@ -151,8 +184,6 @@ func (t *typeValidator) Validate(data interface{}) *Result { // infer schema type (JSON) and format from passed data type schType, format := t.schemaInfoForType(data) - debugLog("path: %s, schType: %s, format: %s, expType: %s, expFmt: %s, kind: %s", t.Path, schType, format, t.Type, t.Format, val.Kind().String()) - // check numerical types // TODO: check unsigned ints // TODO: check json.Number (see schema.go) @@ -163,15 +194,20 @@ func (t *typeValidator) Validate(data interface{}) *Result { if kind != reflect.String && kind != reflect.Slice && t.Format != "" && !(t.Type.Contains(schType) || format == t.Format || isFloatInt || isIntFloat || isLowerInt || isLowerFloat) { // TODO: test case - return errorHelp.sErr(errors.InvalidType(t.Path, t.In, t.Format, format)) + return errorHelp.sErr(errors.InvalidType(t.Path, t.In, t.Format, format), t.Options.recycleResult) } if !(t.Type.Contains(numberType) || t.Type.Contains(integerType)) && t.Format != "" && (kind == reflect.String || kind == reflect.Slice) { - return result + return emptyResult } if !(t.Type.Contains(schType) || isFloatInt || isIntFloat) { - return errorHelp.sErr(errors.InvalidType(t.Path, t.In, strings.Join(t.Type, ","), schType)) + return errorHelp.sErr(errors.InvalidType(t.Path, t.In, strings.Join(t.Type, ","), schType), t.Options.recycleResult) } - return result + + return emptyResult +} + +func (t *typeValidator) redeem() { + pools.poolOfTypeValidators.RedeemValidator(t) } diff --git a/vendor/github.com/go-openapi/validate/validator.go b/vendor/github.com/go-openapi/validate/validator.go index 38cdb9bb6c..c083aecc9d 100644 --- a/vendor/github.com/go-openapi/validate/validator.go +++ b/vendor/github.com/go-openapi/validate/validator.go @@ -39,20 +39,31 @@ type itemsValidator struct { root interface{} path string in string - validators []valueValidator + validators [6]valueValidator KnownFormats strfmt.Registry + Options *SchemaValidatorOptions } -func newItemsValidator(path, in string, items *spec.Items, root interface{}, formats strfmt.Registry) *itemsValidator { - iv := &itemsValidator{path: path, in: in, items: items, root: root, KnownFormats: formats} - iv.validators = []valueValidator{ - &typeValidator{ - Type: spec.StringOrArray([]string{items.Type}), - Nullable: items.Nullable, - Format: items.Format, - In: in, - Path: path, - }, +func newItemsValidator(path, in string, items *spec.Items, root interface{}, formats strfmt.Registry, opts *SchemaValidatorOptions) *itemsValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var iv *itemsValidator + if opts.recycleValidators { + iv = pools.poolOfItemsValidators.BorrowValidator() + } else { + iv = new(itemsValidator) + } + + iv.path = path + iv.in = in + iv.items = items + iv.root = root + iv.KnownFormats = formats + iv.Options = opts + iv.validators = [6]valueValidator{ + iv.typeValidator(), iv.stringValidator(), iv.formatValidator(), iv.numberValidator(), @@ -63,77 +74,152 @@ func newItemsValidator(path, in string, items *spec.Items, root interface{}, for } func (i *itemsValidator) Validate(index int, data interface{}) *Result { + if i.Options.recycleValidators { + defer func() { + i.redeemChildren() + i.redeem() + }() + } + tpe := reflect.TypeOf(data) kind := tpe.Kind() - mainResult := new(Result) + var result *Result + if i.Options.recycleResult { + result = pools.poolOfResults.BorrowResult() + } else { + result = new(Result) + } + path := fmt.Sprintf("%s.%d", i.path, index) - for _, validator := range i.validators { + for idx, validator := range i.validators { + if !validator.Applies(i.root, kind) { + if i.Options.recycleValidators { + // Validate won't be called, so relinquish this validator + if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok { + redeemableChildren.redeemChildren() + } + if redeemable, ok := validator.(interface{ redeem() }); ok { + redeemable.redeem() + } + i.validators[idx] = nil // prevents further (unsafe) usage + } + + continue + } + validator.SetPath(path) - if validator.Applies(i.root, kind) { - result := validator.Validate(data) - mainResult.Merge(result) - mainResult.Inc() - if result != nil && result.HasErrors() { - return mainResult + err := validator.Validate(data) + if i.Options.recycleValidators { + i.validators[idx] = nil // prevents further (unsafe) usage + } + if err != nil { + result.Inc() + if err.HasErrors() { + result.Merge(err) + + break } + + result.Merge(err) } } - return mainResult + + return result +} + +func (i *itemsValidator) typeValidator() valueValidator { + return newTypeValidator( + i.path, + i.in, + spec.StringOrArray([]string{i.items.Type}), + i.items.Nullable, + i.items.Format, + i.Options, + ) } func (i *itemsValidator) commonValidator() valueValidator { - return &basicCommonValidator{ - In: i.in, - Default: i.items.Default, - Enum: i.items.Enum, - } + return newBasicCommonValidator( + "", + i.in, + i.items.Default, + i.items.Enum, + i.Options, + ) } func (i *itemsValidator) sliceValidator() valueValidator { - return &basicSliceValidator{ - In: i.in, - Default: i.items.Default, - MaxItems: i.items.MaxItems, - MinItems: i.items.MinItems, - UniqueItems: i.items.UniqueItems, - Source: i.root, - Items: i.items.Items, - KnownFormats: i.KnownFormats, - } + return newBasicSliceValidator( + "", + i.in, + i.items.Default, + i.items.MaxItems, + i.items.MinItems, + i.items.UniqueItems, + i.items.Items, + i.root, + i.KnownFormats, + i.Options, + ) } func (i *itemsValidator) numberValidator() valueValidator { - return &numberValidator{ - In: i.in, - Default: i.items.Default, - MultipleOf: i.items.MultipleOf, - Maximum: i.items.Maximum, - ExclusiveMaximum: i.items.ExclusiveMaximum, - Minimum: i.items.Minimum, - ExclusiveMinimum: i.items.ExclusiveMinimum, - Type: i.items.Type, - Format: i.items.Format, - } + return newNumberValidator( + "", + i.in, + i.items.Default, + i.items.MultipleOf, + i.items.Maximum, + i.items.ExclusiveMaximum, + i.items.Minimum, + i.items.ExclusiveMinimum, + i.items.Type, + i.items.Format, + i.Options, + ) } func (i *itemsValidator) stringValidator() valueValidator { - return &stringValidator{ - In: i.in, - Default: i.items.Default, - MaxLength: i.items.MaxLength, - MinLength: i.items.MinLength, - Pattern: i.items.Pattern, - AllowEmptyValue: false, - } + return newStringValidator( + "", + i.in, + i.items.Default, + false, // Required + false, // AllowEmpty + i.items.MaxLength, + i.items.MinLength, + i.items.Pattern, + i.Options, + ) } func (i *itemsValidator) formatValidator() valueValidator { - return &formatValidator{ - In: i.in, - //Default: i.items.Default, - Format: i.items.Format, - KnownFormats: i.KnownFormats, + return newFormatValidator( + "", + i.in, + i.items.Format, + i.KnownFormats, + i.Options, + ) +} + +func (i *itemsValidator) redeem() { + pools.poolOfItemsValidators.RedeemValidator(i) +} + +func (i *itemsValidator) redeemChildren() { + for idx, validator := range i.validators { + if validator == nil { + continue + } + if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok { + redeemableChildren.redeemChildren() + } + if redeemable, ok := validator.(interface{ redeem() }); ok { + redeemable.redeem() + } + i.validators[idx] = nil // free up allocated children if not in pool } } @@ -142,265 +228,501 @@ type basicCommonValidator struct { In string Default interface{} Enum []interface{} + Options *SchemaValidatorOptions +} + +func newBasicCommonValidator(path, in string, def interface{}, enum []interface{}, opts *SchemaValidatorOptions) *basicCommonValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var b *basicCommonValidator + if opts.recycleValidators { + b = pools.poolOfBasicCommonValidators.BorrowValidator() + } else { + b = new(basicCommonValidator) + } + + b.Path = path + b.In = in + b.Default = def + b.Enum = enum + b.Options = opts + + return b } func (b *basicCommonValidator) SetPath(path string) { b.Path = path } -func (b *basicCommonValidator) Applies(source interface{}, kind reflect.Kind) bool { +func (b *basicCommonValidator) Applies(source interface{}, _ reflect.Kind) bool { switch source.(type) { case *spec.Parameter, *spec.Schema, *spec.Header: return true + default: + return false } - return false } func (b *basicCommonValidator) Validate(data interface{}) (res *Result) { - if len(b.Enum) > 0 { - for _, enumValue := range b.Enum { - actualType := reflect.TypeOf(enumValue) - if actualType != nil { // Safeguard - expectedValue := reflect.ValueOf(data) - if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) { - if reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), enumValue) { - return nil - } - } - } + if b.Options.recycleValidators { + defer func() { + b.redeem() + }() + } + + if len(b.Enum) == 0 { + return nil + } + + for _, enumValue := range b.Enum { + actualType := reflect.TypeOf(enumValue) + if actualType == nil { // Safeguard + continue + } + + expectedValue := reflect.ValueOf(data) + if expectedValue.IsValid() && + expectedValue.Type().ConvertibleTo(actualType) && + reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), enumValue) { + return nil } - return errorHelp.sErr(errors.EnumFail(b.Path, b.In, data, b.Enum)) } - return nil + + return errorHelp.sErr(errors.EnumFail(b.Path, b.In, data, b.Enum), b.Options.recycleResult) +} + +func (b *basicCommonValidator) redeem() { + pools.poolOfBasicCommonValidators.RedeemValidator(b) } // A HeaderValidator has very limited subset of validations to apply type HeaderValidator struct { name string header *spec.Header - validators []valueValidator + validators [6]valueValidator KnownFormats strfmt.Registry + Options *SchemaValidatorOptions } // NewHeaderValidator creates a new header validator object -func NewHeaderValidator(name string, header *spec.Header, formats strfmt.Registry) *HeaderValidator { - p := &HeaderValidator{name: name, header: header, KnownFormats: formats} - p.validators = []valueValidator{ - &typeValidator{ - Type: spec.StringOrArray([]string{header.Type}), - Nullable: header.Nullable, - Format: header.Format, - In: "header", - Path: name, - }, +func NewHeaderValidator(name string, header *spec.Header, formats strfmt.Registry, options ...Option) *HeaderValidator { + opts := new(SchemaValidatorOptions) + for _, o := range options { + o(opts) + } + + return newHeaderValidator(name, header, formats, opts) +} + +func newHeaderValidator(name string, header *spec.Header, formats strfmt.Registry, opts *SchemaValidatorOptions) *HeaderValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var p *HeaderValidator + if opts.recycleValidators { + p = pools.poolOfHeaderValidators.BorrowValidator() + } else { + p = new(HeaderValidator) + } + + p.name = name + p.header = header + p.KnownFormats = formats + p.Options = opts + p.validators = [6]valueValidator{ + newTypeValidator( + name, + "header", + spec.StringOrArray([]string{header.Type}), + header.Nullable, + header.Format, + p.Options, + ), p.stringValidator(), p.formatValidator(), p.numberValidator(), p.sliceValidator(), p.commonValidator(), } + return p } // Validate the value of the header against its schema func (p *HeaderValidator) Validate(data interface{}) *Result { - result := new(Result) + if p.Options.recycleValidators { + defer func() { + p.redeemChildren() + p.redeem() + }() + } + + if data == nil { + return nil + } + + var result *Result + if p.Options.recycleResult { + result = pools.poolOfResults.BorrowResult() + } else { + result = new(Result) + } + tpe := reflect.TypeOf(data) kind := tpe.Kind() - for _, validator := range p.validators { - if validator.Applies(p.header, kind) { - if err := validator.Validate(data); err != nil { - result.Merge(err) - if err.HasErrors() { - return result + for idx, validator := range p.validators { + if !validator.Applies(p.header, kind) { + if p.Options.recycleValidators { + // Validate won't be called, so relinquish this validator + if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok { + redeemableChildren.redeemChildren() + } + if redeemable, ok := validator.(interface{ redeem() }); ok { + redeemable.redeem() } + p.validators[idx] = nil // prevents further (unsafe) usage } + + continue + } + + err := validator.Validate(data) + if p.Options.recycleValidators { + p.validators[idx] = nil // prevents further (unsafe) usage + } + if err != nil { + if err.HasErrors() { + result.Merge(err) + break + } + result.Merge(err) } } - return nil + + return result } func (p *HeaderValidator) commonValidator() valueValidator { - return &basicCommonValidator{ - Path: p.name, - In: "response", - Default: p.header.Default, - Enum: p.header.Enum, - } + return newBasicCommonValidator( + p.name, + "response", + p.header.Default, + p.header.Enum, + p.Options, + ) } func (p *HeaderValidator) sliceValidator() valueValidator { - return &basicSliceValidator{ - Path: p.name, - In: "response", - Default: p.header.Default, - MaxItems: p.header.MaxItems, - MinItems: p.header.MinItems, - UniqueItems: p.header.UniqueItems, - Items: p.header.Items, - Source: p.header, - KnownFormats: p.KnownFormats, - } + return newBasicSliceValidator( + p.name, + "response", + p.header.Default, + p.header.MaxItems, + p.header.MinItems, + p.header.UniqueItems, + p.header.Items, + p.header, + p.KnownFormats, + p.Options, + ) } func (p *HeaderValidator) numberValidator() valueValidator { - return &numberValidator{ - Path: p.name, - In: "response", - Default: p.header.Default, - MultipleOf: p.header.MultipleOf, - Maximum: p.header.Maximum, - ExclusiveMaximum: p.header.ExclusiveMaximum, - Minimum: p.header.Minimum, - ExclusiveMinimum: p.header.ExclusiveMinimum, - Type: p.header.Type, - Format: p.header.Format, - } + return newNumberValidator( + p.name, + "response", + p.header.Default, + p.header.MultipleOf, + p.header.Maximum, + p.header.ExclusiveMaximum, + p.header.Minimum, + p.header.ExclusiveMinimum, + p.header.Type, + p.header.Format, + p.Options, + ) } func (p *HeaderValidator) stringValidator() valueValidator { - return &stringValidator{ - Path: p.name, - In: "response", - Default: p.header.Default, - Required: true, - MaxLength: p.header.MaxLength, - MinLength: p.header.MinLength, - Pattern: p.header.Pattern, - AllowEmptyValue: false, - } + return newStringValidator( + p.name, + "response", + p.header.Default, + true, + false, + p.header.MaxLength, + p.header.MinLength, + p.header.Pattern, + p.Options, + ) } func (p *HeaderValidator) formatValidator() valueValidator { - return &formatValidator{ - Path: p.name, - In: "response", - //Default: p.header.Default, - Format: p.header.Format, - KnownFormats: p.KnownFormats, + return newFormatValidator( + p.name, + "response", + p.header.Format, + p.KnownFormats, + p.Options, + ) +} + +func (p *HeaderValidator) redeem() { + pools.poolOfHeaderValidators.RedeemValidator(p) +} + +func (p *HeaderValidator) redeemChildren() { + for idx, validator := range p.validators { + if validator == nil { + continue + } + if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok { + redeemableChildren.redeemChildren() + } + if redeemable, ok := validator.(interface{ redeem() }); ok { + redeemable.redeem() + } + p.validators[idx] = nil // free up allocated children if not in pool } } // A ParamValidator has very limited subset of validations to apply type ParamValidator struct { param *spec.Parameter - validators []valueValidator + validators [6]valueValidator KnownFormats strfmt.Registry + Options *SchemaValidatorOptions } // NewParamValidator creates a new param validator object -func NewParamValidator(param *spec.Parameter, formats strfmt.Registry) *ParamValidator { - p := &ParamValidator{param: param, KnownFormats: formats} - p.validators = []valueValidator{ - &typeValidator{ - Type: spec.StringOrArray([]string{param.Type}), - Nullable: param.Nullable, - Format: param.Format, - In: param.In, - Path: param.Name, - }, +func NewParamValidator(param *spec.Parameter, formats strfmt.Registry, options ...Option) *ParamValidator { + opts := new(SchemaValidatorOptions) + for _, o := range options { + o(opts) + } + + return newParamValidator(param, formats, opts) +} + +func newParamValidator(param *spec.Parameter, formats strfmt.Registry, opts *SchemaValidatorOptions) *ParamValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var p *ParamValidator + if opts.recycleValidators { + p = pools.poolOfParamValidators.BorrowValidator() + } else { + p = new(ParamValidator) + } + + p.param = param + p.KnownFormats = formats + p.Options = opts + p.validators = [6]valueValidator{ + newTypeValidator( + param.Name, + param.In, + spec.StringOrArray([]string{param.Type}), + param.Nullable, + param.Format, + p.Options, + ), p.stringValidator(), p.formatValidator(), p.numberValidator(), p.sliceValidator(), p.commonValidator(), } + return p } // Validate the data against the description of the parameter func (p *ParamValidator) Validate(data interface{}) *Result { - result := new(Result) + if data == nil { + return nil + } + + var result *Result + if p.Options.recycleResult { + result = pools.poolOfResults.BorrowResult() + } else { + result = new(Result) + } + tpe := reflect.TypeOf(data) kind := tpe.Kind() + if p.Options.recycleValidators { + defer func() { + p.redeemChildren() + p.redeem() + }() + } + // TODO: validate type - for _, validator := range p.validators { - if validator.Applies(p.param, kind) { - if err := validator.Validate(data); err != nil { - result.Merge(err) - if err.HasErrors() { - return result + for idx, validator := range p.validators { + if !validator.Applies(p.param, kind) { + if p.Options.recycleValidators { + // Validate won't be called, so relinquish this validator + if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok { + redeemableChildren.redeemChildren() + } + if redeemable, ok := validator.(interface{ redeem() }); ok { + redeemable.redeem() } + p.validators[idx] = nil // prevents further (unsafe) usage } + + continue + } + + err := validator.Validate(data) + if p.Options.recycleValidators { + p.validators[idx] = nil // prevents further (unsafe) usage + } + if err != nil { + if err.HasErrors() { + result.Merge(err) + break + } + result.Merge(err) } } - return nil + + return result } func (p *ParamValidator) commonValidator() valueValidator { - return &basicCommonValidator{ - Path: p.param.Name, - In: p.param.In, - Default: p.param.Default, - Enum: p.param.Enum, - } + return newBasicCommonValidator( + p.param.Name, + p.param.In, + p.param.Default, + p.param.Enum, + p.Options, + ) } func (p *ParamValidator) sliceValidator() valueValidator { - return &basicSliceValidator{ - Path: p.param.Name, - In: p.param.In, - Default: p.param.Default, - MaxItems: p.param.MaxItems, - MinItems: p.param.MinItems, - UniqueItems: p.param.UniqueItems, - Items: p.param.Items, - Source: p.param, - KnownFormats: p.KnownFormats, - } + return newBasicSliceValidator( + p.param.Name, + p.param.In, + p.param.Default, + p.param.MaxItems, + p.param.MinItems, + p.param.UniqueItems, + p.param.Items, + p.param, + p.KnownFormats, + p.Options, + ) } func (p *ParamValidator) numberValidator() valueValidator { - return &numberValidator{ - Path: p.param.Name, - In: p.param.In, - Default: p.param.Default, - MultipleOf: p.param.MultipleOf, - Maximum: p.param.Maximum, - ExclusiveMaximum: p.param.ExclusiveMaximum, - Minimum: p.param.Minimum, - ExclusiveMinimum: p.param.ExclusiveMinimum, - Type: p.param.Type, - Format: p.param.Format, - } + return newNumberValidator( + p.param.Name, + p.param.In, + p.param.Default, + p.param.MultipleOf, + p.param.Maximum, + p.param.ExclusiveMaximum, + p.param.Minimum, + p.param.ExclusiveMinimum, + p.param.Type, + p.param.Format, + p.Options, + ) } func (p *ParamValidator) stringValidator() valueValidator { - return &stringValidator{ - Path: p.param.Name, - In: p.param.In, - Default: p.param.Default, - AllowEmptyValue: p.param.AllowEmptyValue, - Required: p.param.Required, - MaxLength: p.param.MaxLength, - MinLength: p.param.MinLength, - Pattern: p.param.Pattern, - } + return newStringValidator( + p.param.Name, + p.param.In, + p.param.Default, + p.param.Required, + p.param.AllowEmptyValue, + p.param.MaxLength, + p.param.MinLength, + p.param.Pattern, + p.Options, + ) } func (p *ParamValidator) formatValidator() valueValidator { - return &formatValidator{ - Path: p.param.Name, - In: p.param.In, - //Default: p.param.Default, - Format: p.param.Format, - KnownFormats: p.KnownFormats, + return newFormatValidator( + p.param.Name, + p.param.In, + p.param.Format, + p.KnownFormats, + p.Options, + ) +} + +func (p *ParamValidator) redeem() { + pools.poolOfParamValidators.RedeemValidator(p) +} + +func (p *ParamValidator) redeemChildren() { + for idx, validator := range p.validators { + if validator == nil { + continue + } + if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok { + redeemableChildren.redeemChildren() + } + if redeemable, ok := validator.(interface{ redeem() }); ok { + redeemable.redeem() + } + p.validators[idx] = nil // free up allocated children if not in pool } } type basicSliceValidator struct { - Path string - In string - Default interface{} - MaxItems *int64 - MinItems *int64 - UniqueItems bool - Items *spec.Items - Source interface{} - itemsValidator *itemsValidator - KnownFormats strfmt.Registry + Path string + In string + Default interface{} + MaxItems *int64 + MinItems *int64 + UniqueItems bool + Items *spec.Items + Source interface{} + KnownFormats strfmt.Registry + Options *SchemaValidatorOptions +} + +func newBasicSliceValidator( + path, in string, + def interface{}, maxItems, minItems *int64, uniqueItems bool, items *spec.Items, + source interface{}, formats strfmt.Registry, + opts *SchemaValidatorOptions) *basicSliceValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var s *basicSliceValidator + if opts.recycleValidators { + s = pools.poolOfBasicSliceValidators.BorrowValidator() + } else { + s = new(basicSliceValidator) + } + + s.Path = path + s.In = in + s.Default = def + s.MaxItems = maxItems + s.MinItems = minItems + s.UniqueItems = uniqueItems + s.Items = items + s.Source = source + s.KnownFormats = formats + s.Options = opts + + return s } func (s *basicSliceValidator) SetPath(path string) { @@ -411,60 +733,61 @@ func (s *basicSliceValidator) Applies(source interface{}, kind reflect.Kind) boo switch source.(type) { case *spec.Parameter, *spec.Items, *spec.Header: return kind == reflect.Slice + default: + return false } - return false } func (s *basicSliceValidator) Validate(data interface{}) *Result { + if s.Options.recycleValidators { + defer func() { + s.redeem() + }() + } val := reflect.ValueOf(data) size := int64(val.Len()) if s.MinItems != nil { if err := MinItems(s.Path, s.In, size, *s.MinItems); err != nil { - return errorHelp.sErr(err) + return errorHelp.sErr(err, s.Options.recycleResult) } } if s.MaxItems != nil { if err := MaxItems(s.Path, s.In, size, *s.MaxItems); err != nil { - return errorHelp.sErr(err) + return errorHelp.sErr(err, s.Options.recycleResult) } } if s.UniqueItems { if err := UniqueItems(s.Path, s.In, data); err != nil { - return errorHelp.sErr(err) + return errorHelp.sErr(err, s.Options.recycleResult) } } - if s.itemsValidator == nil && s.Items != nil { - s.itemsValidator = newItemsValidator(s.Path, s.In, s.Items, s.Source, s.KnownFormats) + if s.Items == nil { + return nil } - if s.itemsValidator != nil { - for i := 0; i < int(size); i++ { - ele := val.Index(i) - if err := s.itemsValidator.Validate(i, ele.Interface()); err != nil && err.HasErrors() { + for i := 0; i < int(size); i++ { + itemsValidator := newItemsValidator(s.Path, s.In, s.Items, s.Source, s.KnownFormats, s.Options) + ele := val.Index(i) + if err := itemsValidator.Validate(i, ele.Interface()); err != nil { + if err.HasErrors() { return err } + if err.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(err) + } } } + return nil } -/* unused -func (s *basicSliceValidator) hasDuplicates(value reflect.Value, size int) bool { - dict := make(map[interface{}]struct{}) - for i := 0; i < size; i++ { - ele := value.Index(i) - if _, ok := dict[ele.Interface()]; ok { - return true - } - dict[ele.Interface()] = struct{}{} - } - return false +func (s *basicSliceValidator) redeem() { + pools.poolOfBasicSliceValidators.RedeemValidator(s) } -*/ type numberValidator struct { Path string @@ -476,8 +799,40 @@ type numberValidator struct { Minimum *float64 ExclusiveMinimum bool // Allows for more accurate behavior regarding integers - Type string - Format string + Type string + Format string + Options *SchemaValidatorOptions +} + +func newNumberValidator( + path, in string, def interface{}, + multipleOf, maximum *float64, exclusiveMaximum bool, minimum *float64, exclusiveMinimum bool, + typ, format string, + opts *SchemaValidatorOptions) *numberValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var n *numberValidator + if opts.recycleValidators { + n = pools.poolOfNumberValidators.BorrowValidator() + } else { + n = new(numberValidator) + } + + n.Path = path + n.In = in + n.Default = def + n.MultipleOf = multipleOf + n.Maximum = maximum + n.ExclusiveMaximum = exclusiveMaximum + n.Minimum = minimum + n.ExclusiveMinimum = exclusiveMinimum + n.Type = typ + n.Format = format + n.Options = opts + + return n } func (n *numberValidator) SetPath(path string) { @@ -489,12 +844,10 @@ func (n *numberValidator) Applies(source interface{}, kind reflect.Kind) bool { case *spec.Parameter, *spec.Schema, *spec.Items, *spec.Header: isInt := kind >= reflect.Int && kind <= reflect.Uint64 isFloat := kind == reflect.Float32 || kind == reflect.Float64 - r := isInt || isFloat - debugLog("schema props validator for %q applies %t for %T (kind: %v) isInt=%t, isFloat=%t\n", n.Path, r, source, kind, isInt, isFloat) - return r + return isInt || isFloat + default: + return false } - debugLog("schema props validator for %q applies %t for %T (kind: %v)\n", n.Path, false, source, kind) - return false } // Validate provides a validator for generic JSON numbers, @@ -519,11 +872,18 @@ func (n *numberValidator) Applies(source interface{}, kind reflect.Kind) bool { // // TODO: default boundaries with MAX_SAFE_INTEGER are not checked (specific to json.Number?) func (n *numberValidator) Validate(val interface{}) *Result { - res := new(Result) + if n.Options.recycleValidators { + defer func() { + n.redeem() + }() + } - resMultiple := new(Result) - resMinimum := new(Result) - resMaximum := new(Result) + var res, resMultiple, resMinimum, resMaximum *Result + if n.Options.recycleResult { + res = pools.poolOfResults.BorrowResult() + } else { + res = new(Result) + } // Used only to attempt to validate constraint on value, // even though value or constraint specified do not match type and format @@ -533,68 +893,106 @@ func (n *numberValidator) Validate(val interface{}) *Result { res.AddErrors(IsValueValidAgainstRange(val, n.Type, n.Format, "Checked", n.Path)) if n.MultipleOf != nil { + resMultiple = pools.poolOfResults.BorrowResult() + // Is the constraint specifier within the range of the specific numeric type and format? resMultiple.AddErrors(IsValueValidAgainstRange(*n.MultipleOf, n.Type, n.Format, "MultipleOf", n.Path)) if resMultiple.IsValid() { // Constraint validated with compatible types if err := MultipleOfNativeType(n.Path, n.In, val, *n.MultipleOf); err != nil { - resMultiple.Merge(errorHelp.sErr(err)) + resMultiple.Merge(errorHelp.sErr(err, n.Options.recycleResult)) } } else { // Constraint nevertheless validated, converted as general number if err := MultipleOf(n.Path, n.In, data, *n.MultipleOf); err != nil { - resMultiple.Merge(errorHelp.sErr(err)) + resMultiple.Merge(errorHelp.sErr(err, n.Options.recycleResult)) } } } - // nolint: dupl if n.Maximum != nil { + resMaximum = pools.poolOfResults.BorrowResult() + // Is the constraint specifier within the range of the specific numeric type and format? resMaximum.AddErrors(IsValueValidAgainstRange(*n.Maximum, n.Type, n.Format, "Maximum boundary", n.Path)) if resMaximum.IsValid() { // Constraint validated with compatible types if err := MaximumNativeType(n.Path, n.In, val, *n.Maximum, n.ExclusiveMaximum); err != nil { - resMaximum.Merge(errorHelp.sErr(err)) + resMaximum.Merge(errorHelp.sErr(err, n.Options.recycleResult)) } } else { // Constraint nevertheless validated, converted as general number if err := Maximum(n.Path, n.In, data, *n.Maximum, n.ExclusiveMaximum); err != nil { - resMaximum.Merge(errorHelp.sErr(err)) + resMaximum.Merge(errorHelp.sErr(err, n.Options.recycleResult)) } } } - // nolint: dupl if n.Minimum != nil { + resMinimum = pools.poolOfResults.BorrowResult() + // Is the constraint specifier within the range of the specific numeric type and format? resMinimum.AddErrors(IsValueValidAgainstRange(*n.Minimum, n.Type, n.Format, "Minimum boundary", n.Path)) if resMinimum.IsValid() { // Constraint validated with compatible types if err := MinimumNativeType(n.Path, n.In, val, *n.Minimum, n.ExclusiveMinimum); err != nil { - resMinimum.Merge(errorHelp.sErr(err)) + resMinimum.Merge(errorHelp.sErr(err, n.Options.recycleResult)) } } else { // Constraint nevertheless validated, converted as general number if err := Minimum(n.Path, n.In, data, *n.Minimum, n.ExclusiveMinimum); err != nil { - resMinimum.Merge(errorHelp.sErr(err)) + resMinimum.Merge(errorHelp.sErr(err, n.Options.recycleResult)) } } } res.Merge(resMultiple, resMinimum, resMaximum) res.Inc() + return res } +func (n *numberValidator) redeem() { + pools.poolOfNumberValidators.RedeemValidator(n) +} + type stringValidator struct { + Path string + In string Default interface{} Required bool AllowEmptyValue bool MaxLength *int64 MinLength *int64 Pattern string - Path string - In string + Options *SchemaValidatorOptions +} + +func newStringValidator( + path, in string, + def interface{}, required, allowEmpty bool, maxLength, minLength *int64, pattern string, + opts *SchemaValidatorOptions) *stringValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var s *stringValidator + if opts.recycleValidators { + s = pools.poolOfStringValidators.BorrowValidator() + } else { + s = new(stringValidator) + } + + s.Path = path + s.In = in + s.Default = def + s.Required = required + s.AllowEmptyValue = allowEmpty + s.MaxLength = maxLength + s.MinLength = minLength + s.Pattern = pattern + s.Options = opts + + return s } func (s *stringValidator) SetPath(path string) { @@ -604,42 +1002,50 @@ func (s *stringValidator) SetPath(path string) { func (s *stringValidator) Applies(source interface{}, kind reflect.Kind) bool { switch source.(type) { case *spec.Parameter, *spec.Schema, *spec.Items, *spec.Header: - r := kind == reflect.String - debugLog("string validator for %q applies %t for %T (kind: %v)\n", s.Path, r, source, kind) - return r + return kind == reflect.String + default: + return false } - debugLog("string validator for %q applies %t for %T (kind: %v)\n", s.Path, false, source, kind) - return false } func (s *stringValidator) Validate(val interface{}) *Result { + if s.Options.recycleValidators { + defer func() { + s.redeem() + }() + } + data, ok := val.(string) if !ok { - return errorHelp.sErr(errors.InvalidType(s.Path, s.In, stringType, val)) + return errorHelp.sErr(errors.InvalidType(s.Path, s.In, stringType, val), s.Options.recycleResult) } if s.Required && !s.AllowEmptyValue && (s.Default == nil || s.Default == "") { if err := RequiredString(s.Path, s.In, data); err != nil { - return errorHelp.sErr(err) + return errorHelp.sErr(err, s.Options.recycleResult) } } if s.MaxLength != nil { if err := MaxLength(s.Path, s.In, data, *s.MaxLength); err != nil { - return errorHelp.sErr(err) + return errorHelp.sErr(err, s.Options.recycleResult) } } if s.MinLength != nil { if err := MinLength(s.Path, s.In, data, *s.MinLength); err != nil { - return errorHelp.sErr(err) + return errorHelp.sErr(err, s.Options.recycleResult) } } if s.Pattern != "" { if err := Pattern(s.Path, s.In, data, s.Pattern); err != nil { - return errorHelp.sErr(err) + return errorHelp.sErr(err, s.Options.recycleResult) } } return nil } + +func (s *stringValidator) redeem() { + pools.poolOfStringValidators.RedeemValidator(s) +} diff --git a/vendor/github.com/go-openapi/validate/values.go b/vendor/github.com/go-openapi/validate/values.go index e7ad8c1033..5f6f5ee61e 100644 --- a/vendor/github.com/go-openapi/validate/values.go +++ b/vendor/github.com/go-openapi/validate/values.go @@ -120,7 +120,7 @@ func UniqueItems(path, in string, data interface{}) *errors.Validation { // MinLength validates a string for minimum length func MinLength(path, in, data string, minLength int64) *errors.Validation { - strLen := int64(utf8.RuneCount([]byte(data))) + strLen := int64(utf8.RuneCountInString(data)) if strLen < minLength { return errors.TooShort(path, in, minLength, data) } @@ -129,7 +129,7 @@ func MinLength(path, in, data string, minLength int64) *errors.Validation { // MaxLength validates a string for maximum length func MaxLength(path, in, data string, maxLength int64) *errors.Validation { - strLen := int64(utf8.RuneCount([]byte(data))) + strLen := int64(utf8.RuneCountInString(data)) if strLen > maxLength { return errors.TooLong(path, in, maxLength, data) } @@ -315,7 +315,7 @@ func FormatOf(path, in, format, data string, registry strfmt.Registry) *errors.V // TODO: Normally, a JSON MAX_SAFE_INTEGER check would ensure conversion remains loss-free func MaximumNativeType(path, in string, val interface{}, max float64, exclusive bool) *errors.Validation { kind := reflect.ValueOf(val).Type().Kind() - switch kind { + switch kind { //nolint:exhaustive case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: value := valueHelp.asInt64(val) return MaximumInt(path, in, value, int64(max), exclusive) @@ -345,7 +345,7 @@ func MaximumNativeType(path, in string, val interface{}, max float64, exclusive // TODO: Normally, a JSON MAX_SAFE_INTEGER check would ensure conversion remains loss-free func MinimumNativeType(path, in string, val interface{}, min float64, exclusive bool) *errors.Validation { kind := reflect.ValueOf(val).Type().Kind() - switch kind { + switch kind { //nolint:exhaustive case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: value := valueHelp.asInt64(val) return MinimumInt(path, in, value, int64(min), exclusive) @@ -375,7 +375,7 @@ func MinimumNativeType(path, in string, val interface{}, min float64, exclusive // TODO: Normally, a JSON MAX_SAFE_INTEGER check would ensure conversion remains loss-free func MultipleOfNativeType(path, in string, val interface{}, multipleOf float64) *errors.Validation { kind := reflect.ValueOf(val).Type().Kind() - switch kind { + switch kind { //nolint:exhaustive case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: value := valueHelp.asInt64(val) return MultipleOfInt(path, in, value, int64(multipleOf)) @@ -399,7 +399,7 @@ func IsValueValidAgainstRange(val interface{}, typeName, format, prefix, path st // What is the string representation of val var stringRep string - switch kind { + switch kind { //nolint:exhaustive case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: stringRep = swag.FormatUint64(valueHelp.asUint64(val)) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: diff --git a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md index 0a89497999..13be7ec6a3 100644 --- a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md +++ b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md @@ -1,3 +1,8 @@ +## 2.17.3 + +### Fixes +`ginkgo watch` now ignores hidden files [bde6e00] + ## 2.17.2 ### Fixes diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go index 17d052bdc3..0e6ae1f290 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go @@ -4,6 +4,7 @@ import ( "fmt" "os" "regexp" + "strings" "time" ) @@ -79,6 +80,10 @@ func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Ti continue } + if isHiddenFile(info) { + continue + } + if goTestRegExp.MatchString(info.Name()) { testHash += p.hashForFileInfo(info) if info.ModTime().After(testModifiedTime) { @@ -103,6 +108,10 @@ func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Ti return } +func isHiddenFile(info os.FileInfo) bool { + return strings.HasPrefix(info.Name(), ".") || strings.HasPrefix(info.Name(), "_") +} + func (p *PackageHash) hashForFileInfo(info os.FileInfo) string { return fmt.Sprintf("%s_%d_%d", info.Name(), info.Size(), info.ModTime().UnixNano()) } diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go index 5dd0140cd3..52cc3abc8f 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/version.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -1,3 +1,3 @@ package types -const VERSION = "2.17.2" +const VERSION = "2.17.3" diff --git a/vendor/github.com/sigstore/fulcio/pkg/certificate/extensions.go b/vendor/github.com/sigstore/fulcio/pkg/certificate/extensions.go index d38d707db3..38f80d5ae6 100644 --- a/vendor/github.com/sigstore/fulcio/pkg/certificate/extensions.go +++ b/vendor/github.com/sigstore/fulcio/pkg/certificate/extensions.go @@ -338,7 +338,7 @@ func (e Extensions) Render() ([]pkix.Extension, error) { return exts, nil } -func parseExtensions(ext []pkix.Extension) (Extensions, error) { +func ParseExtensions(ext []pkix.Extension) (Extensions, error) { out := Extensions{} for _, e := range ext { diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_v001_schema.go index 2263abd785..93dce8715a 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_v001_schema.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_v001_schema.go @@ -39,8 +39,7 @@ import ( type CoseV001Schema struct { // data - // Required: true - Data *CoseV001SchemaData `json:"data"` + Data *CoseV001SchemaData `json:"data,omitempty"` // The COSE Sign1 Message // Format: byte @@ -71,9 +70,8 @@ func (m *CoseV001Schema) Validate(formats strfmt.Registry) error { } func (m *CoseV001Schema) validateData(formats strfmt.Registry) error { - - if err := validate.Required("data", "body", m.Data); err != nil { - return err + if swag.IsZero(m.Data) { // not required + return nil } if m.Data != nil { @@ -117,6 +115,10 @@ func (m *CoseV001Schema) contextValidateData(ctx context.Context, formats strfmt if m.Data != nil { + if swag.IsZero(m.Data) { // not required + return nil + } + if err := m.Data.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("data") diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_v001_schema.go index f8bf233ed7..3b906ae29e 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_v001_schema.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_v001_schema.go @@ -277,10 +277,10 @@ type HashedrekordV001SchemaDataHash struct { // The hashing function used to compute the hash value // Required: true - // Enum: [sha256] + // Enum: [sha256 sha384 sha512] Algorithm *string `json:"algorithm"` - // The hash value for the content + // The hash value for the content, as represented by a lower case hexadecimal string // Required: true Value *string `json:"value"` } @@ -307,7 +307,7 @@ var hashedrekordV001SchemaDataHashTypeAlgorithmPropEnum []interface{} func init() { var res []string - if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { + if err := json.Unmarshal([]byte(`["sha256","sha384","sha512"]`), &res); err != nil { panic(err) } for _, v := range res { @@ -319,6 +319,12 @@ const ( // HashedrekordV001SchemaDataHashAlgorithmSha256 captures enum value "sha256" HashedrekordV001SchemaDataHashAlgorithmSha256 string = "sha256" + + // HashedrekordV001SchemaDataHashAlgorithmSha384 captures enum value "sha384" + HashedrekordV001SchemaDataHashAlgorithmSha384 string = "sha384" + + // HashedrekordV001SchemaDataHashAlgorithmSha512 captures enum value "sha512" + HashedrekordV001SchemaDataHashAlgorithmSha512 string = "sha512" ) // prop value enum diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/log_entry.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/log_entry.go index ec271c1740..ee32ded414 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/log_entry.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/log_entry.go @@ -95,7 +95,7 @@ type LogEntryAnon struct { // Required: true Body interface{} `json:"body"` - // integrated time + // The time the entry was added to the log as a Unix timestamp in seconds // Required: true IntegratedTime *int64 `json:"integratedTime"` diff --git a/vendor/github.com/sigstore/rekor/pkg/util/checkpoint.go b/vendor/github.com/sigstore/rekor/pkg/util/checkpoint.go index 94dc68c66a..bf29f24901 100644 --- a/vendor/github.com/sigstore/rekor/pkg/util/checkpoint.go +++ b/vendor/github.com/sigstore/rekor/pkg/util/checkpoint.go @@ -23,13 +23,12 @@ import ( "fmt" "strconv" "strings" - "time" "github.com/sigstore/sigstore/pkg/signature" "github.com/sigstore/sigstore/pkg/signature/options" ) -// heavily borrowed from https://github.com/google/trillian-examples/blob/master/formats/log/checkpoint.go +// heavily borrowed from https://github.com/transparency-dev/formats/blob/main/log/checkpoint.go type Checkpoint struct { // Origin is the unique identifier/version string @@ -145,27 +144,6 @@ func (r *SignedCheckpoint) UnmarshalText(data []byte) error { return nil } -func (r *SignedCheckpoint) SetTimestamp(timestamp uint64) { - var ts uint64 - for i, val := range r.OtherContent { - if n, _ := fmt.Fscanf(strings.NewReader(val), "Timestamp: %d", &ts); n == 1 { - r.OtherContent = append(r.OtherContent[:i], r.OtherContent[i+1:]...) - } - } - r.OtherContent = append(r.OtherContent, fmt.Sprintf("Timestamp: %d", timestamp)) - r.SignedNote = SignedNote{Note: string(r.Checkpoint.String())} -} - -func (r *SignedCheckpoint) GetTimestamp() uint64 { - var ts uint64 - for _, val := range r.OtherContent { - if n, _ := fmt.Fscanf(strings.NewReader(val), "Timestamp: %d", &ts); n == 1 { - break - } - } - return ts -} - // CreateAndSignCheckpoint creates a signed checkpoint as a commitment to the current root hash func CreateAndSignCheckpoint(ctx context.Context, hostname string, treeID int64, treeSize uint64, rootHash []byte, signer signature.Signer) ([]byte, error) { sth, err := CreateSignedCheckpoint(Checkpoint{ @@ -174,15 +152,14 @@ func CreateAndSignCheckpoint(ctx context.Context, hostname string, treeID int64, Hash: rootHash, }) if err != nil { - return nil, fmt.Errorf("error creating checkpoint: %v", err) + return nil, fmt.Errorf("error creating checkpoint: %w", err) } - sth.SetTimestamp(uint64(time.Now().UnixNano())) if _, err := sth.Sign(hostname, signer, options.WithContext(ctx)); err != nil { - return nil, fmt.Errorf("error signing checkpoint: %v", err) + return nil, fmt.Errorf("error signing checkpoint: %w", err) } scBytes, err := sth.SignedNote.MarshalText() if err != nil { - return nil, fmt.Errorf("error marshalling checkpoint: %v", err) + return nil, fmt.Errorf("error marshalling checkpoint: %w", err) } return scBytes, nil } diff --git a/vendor/github.com/sigstore/rekor/pkg/util/sha.go b/vendor/github.com/sigstore/rekor/pkg/util/sha.go index 8509a55858..07b8fb1b53 100644 --- a/vendor/github.com/sigstore/rekor/pkg/util/sha.go +++ b/vendor/github.com/sigstore/rekor/pkg/util/sha.go @@ -15,6 +15,7 @@ package util import ( + "crypto" "fmt" "strings" ) @@ -33,9 +34,46 @@ func PrefixSHA(sha string) string { prefix = "sha1:" case 64: prefix = "sha256:" + case 96: + prefix = "sha384:" case 128: prefix = "sha512:" } return fmt.Sprintf("%v%v", prefix, sha) } + +func UnprefixSHA(sha string) (crypto.Hash, string) { + components := strings.Split(sha, ":") + + if len(components) == 2 { + prefix := components[0] + sha = components[1] + + switch prefix { + case "sha1": + return crypto.SHA1, sha + case "sha256": + return crypto.SHA256, sha + case "sha384": + return crypto.SHA384, sha + case "sha512": + return crypto.SHA512, sha + default: + return crypto.Hash(0), "" + } + } + + switch len(sha) { + case 40: + return crypto.SHA1, sha + case 64: + return crypto.SHA256, sha + case 96: + return crypto.SHA384, sha + case 128: + return crypto.SHA512, sha + } + + return crypto.Hash(0), "" +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go index 303e5505e4..cabf645a5b 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go @@ -31,10 +31,16 @@ const ( // Server HTTP metrics. const ( - RequestCount = "http.server.request_count" // Incoming request count total - RequestContentLength = "http.server.request_content_length" // Incoming request bytes total - ResponseContentLength = "http.server.response_content_length" // Incoming response bytes total - ServerLatency = "http.server.duration" // Incoming end to end duration, microseconds + serverRequestSize = "http.server.request.size" // Incoming request bytes total + serverResponseSize = "http.server.response.size" // Incoming response bytes total + serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds +) + +// Client HTTP metrics. +const ( + clientRequestSize = "http.client.request.size" // Outgoing request bytes total + clientResponseSize = "http.client.response.size" // Outgoing response bytes total + clientDuration = "http.client.duration" // Outgoing end to end duration, milliseconds ) // Filter is a predicate used to determine whether a given http.request should @@ -42,5 +48,5 @@ const ( type Filter func(*http.Request) bool func newTracer(tp trace.TracerProvider) trace.Tracer { - return tp.Tracer(instrumentationName, trace.WithInstrumentationVersion(Version())) + return tp.Tracer(ScopeName, trace.WithInstrumentationVersion(Version())) } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go index e4fa1b8d9d..a1b5b5e5aa 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go @@ -25,9 +25,8 @@ import ( "go.opentelemetry.io/otel/trace" ) -const ( - instrumentationName = "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" -) +// ScopeName is the instrumentation scope name. +const ScopeName = "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" // config represents the configuration options available for the http.Handler // and http.Transport types. @@ -76,7 +75,7 @@ func newConfig(opts ...Option) *config { } c.Meter = c.MeterProvider.Meter( - instrumentationName, + ScopeName, metric.WithInstrumentationVersion(Version()), ) diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go index b2fbe07841..1fc15019e6 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -26,7 +26,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" - semconv "go.opentelemetry.io/otel/semconv/v1.17.0" + semconv "go.opentelemetry.io/otel/semconv/v1.20.0" "go.opentelemetry.io/otel/trace" ) @@ -43,10 +43,12 @@ type middleware struct { writeEvent bool filters []Filter spanNameFormatter func(string, *http.Request) string - counters map[string]metric.Int64Counter - valueRecorders map[string]metric.Float64Histogram publicEndpoint bool publicEndpointFn func(*http.Request) bool + + requestBytesCounter metric.Int64Counter + responseBytesCounter metric.Int64Counter + serverLatencyMeasure metric.Float64Histogram } func defaultHandlerFormatter(operation string, _ *http.Request) string { @@ -104,21 +106,27 @@ func handleErr(err error) { } func (h *middleware) createMeasures() { - h.counters = make(map[string]metric.Int64Counter) - h.valueRecorders = make(map[string]metric.Float64Histogram) - - requestBytesCounter, err := h.meter.Int64Counter(RequestContentLength) + var err error + h.requestBytesCounter, err = h.meter.Int64Counter( + serverRequestSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP request messages."), + ) handleErr(err) - responseBytesCounter, err := h.meter.Int64Counter(ResponseContentLength) + h.responseBytesCounter, err = h.meter.Int64Counter( + serverResponseSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP response messages."), + ) handleErr(err) - serverLatencyMeasure, err := h.meter.Float64Histogram(ServerLatency) + h.serverLatencyMeasure, err = h.meter.Float64Histogram( + serverDuration, + metric.WithUnit("ms"), + metric.WithDescription("Measures the duration of inbound HTTP requests."), + ) handleErr(err) - - h.counters[RequestContentLength] = requestBytesCounter - h.counters[ResponseContentLength] = responseBytesCounter - h.valueRecorders[ServerLatency] = serverLatencyMeasure } // serveHTTP sets up tracing and calls the given next http.Handler with the span @@ -216,7 +224,7 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http next.ServeHTTP(w, r.WithContext(ctx)) - setAfterServeAttributes(span, bw.read, rww.written, rww.statusCode, bw.err, rww.err) + setAfterServeAttributes(span, bw.read.Load(), rww.written, rww.statusCode, bw.err, rww.err) // Add metrics attributes := append(labeler.Get(), semconvutil.HTTPServerRequestMetrics(h.server, r)...) @@ -224,13 +232,13 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http attributes = append(attributes, semconv.HTTPStatusCode(rww.statusCode)) } o := metric.WithAttributes(attributes...) - h.counters[RequestContentLength].Add(ctx, bw.read, o) - h.counters[ResponseContentLength].Add(ctx, rww.written, o) + h.requestBytesCounter.Add(ctx, bw.read.Load(), o) + h.responseBytesCounter.Add(ctx, rww.written, o) // Use floating point division here for higher precision (instead of Millisecond method). elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) - h.valueRecorders[ServerLatency].Record(ctx, elapsedTime, o) + h.serverLatencyMeasure.Record(ctx, elapsedTime, o) } func setAfterServeAttributes(span trace.Span, read, wrote int64, statusCode int, rerr, werr error) { diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go index d3dede9ebb..0efd5261f6 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go @@ -24,7 +24,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" - semconv "go.opentelemetry.io/otel/semconv/v1.17.0" + semconv "go.opentelemetry.io/otel/semconv/v1.20.0" ) // HTTPClientResponse returns trace attributes for an HTTP response received by a @@ -43,14 +43,22 @@ func HTTPClientResponse(resp *http.Response) []attribute.KeyValue { } // HTTPClientRequest returns trace attributes for an HTTP request made by a client. -// The following attributes are always returned: "http.url", "http.flavor", -// "http.method", "net.peer.name". The following attributes are returned if the -// related values are defined in req: "net.peer.port", "http.user_agent", -// "http.request_content_length", "enduser.id". +// The following attributes are always returned: "http.url", "http.method", +// "net.peer.name". The following attributes are returned if the related values +// are defined in req: "net.peer.port", "user_agent.original", +// "http.request_content_length". func HTTPClientRequest(req *http.Request) []attribute.KeyValue { return hc.ClientRequest(req) } +// HTTPClientRequestMetrics returns metric attributes for an HTTP request made by a client. +// The following attributes are always returned: "http.method", "net.peer.name". +// The following attributes are returned if the +// related values are defined in req: "net.peer.port". +func HTTPClientRequestMetrics(req *http.Request) []attribute.KeyValue { + return hc.ClientRequestMetrics(req) +} + // HTTPClientStatus returns a span status code and message for an HTTP status code // value received by a client. func HTTPClientStatus(code int) (codes.Code, string) { @@ -75,10 +83,9 @@ func HTTPClientStatus(code int) (codes.Code, string) { // The req Host will be used to determine the server instead. // // The following attributes are always returned: "http.method", "http.scheme", -// "http.flavor", "http.target", "net.host.name". The following attributes are -// returned if they related values are defined in req: "net.host.port", -// "net.sock.peer.addr", "net.sock.peer.port", "http.user_agent", "enduser.id", -// "http.client_ip". +// "http.target", "net.host.name". The following attributes are returned if +// they related values are defined in req: "net.host.port", "net.sock.peer.addr", +// "net.sock.peer.port", "user_agent.original", "http.client_ip". func HTTPServerRequest(server string, req *http.Request) []attribute.KeyValue { return hc.ServerRequest(server, req) } @@ -101,8 +108,8 @@ func HTTPServerRequest(server string, req *http.Request) []attribute.KeyValue { // The req Host will be used to determine the server instead. // // The following attributes are always returned: "http.method", "http.scheme", -// "http.flavor", "net.host.name". The following attributes are -// returned if they related values are defined in req: "net.host.port". +// "net.host.name". The following attributes are returned if they related +// values are defined in req: "net.host.port". func HTTPServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue { return hc.ServerRequestMetrics(server, req) } @@ -114,44 +121,12 @@ func HTTPServerStatus(code int) (codes.Code, string) { return hc.ServerStatus(code) } -// HTTPRequestHeader returns the contents of h as attributes. -// -// Instrumentation should require an explicit configuration of which headers to -// captured and then prune what they pass here. Including all headers can be a -// security risk - explicit configuration helps avoid leaking sensitive -// information. -// -// The User-Agent header is already captured in the http.user_agent attribute -// from ClientRequest and ServerRequest. Instrumentation may provide an option -// to capture that header here even though it is not recommended. Otherwise, -// instrumentation should filter that out of what is passed. -func HTTPRequestHeader(h http.Header) []attribute.KeyValue { - return hc.RequestHeader(h) -} - -// HTTPResponseHeader returns the contents of h as attributes. -// -// Instrumentation should require an explicit configuration of which headers to -// captured and then prune what they pass here. Including all headers can be a -// security risk - explicit configuration helps avoid leaking sensitive -// information. -// -// The User-Agent header is already captured in the http.user_agent attribute -// from ClientRequest and ServerRequest. Instrumentation may provide an option -// to capture that header here even though it is not recommended. Otherwise, -// instrumentation should filter that out of what is passed. -func HTTPResponseHeader(h http.Header) []attribute.KeyValue { - return hc.ResponseHeader(h) -} - // httpConv are the HTTP semantic convention attributes defined for a version // of the OpenTelemetry specification. type httpConv struct { NetConv *netConv - EnduserIDKey attribute.Key HTTPClientIPKey attribute.Key - HTTPFlavorKey attribute.Key HTTPMethodKey attribute.Key HTTPRequestContentLengthKey attribute.Key HTTPResponseContentLengthKey attribute.Key @@ -161,15 +136,13 @@ type httpConv struct { HTTPStatusCodeKey attribute.Key HTTPTargetKey attribute.Key HTTPURLKey attribute.Key - HTTPUserAgentKey attribute.Key + UserAgentOriginalKey attribute.Key } var hc = &httpConv{ NetConv: nc, - EnduserIDKey: semconv.EnduserIDKey, HTTPClientIPKey: semconv.HTTPClientIPKey, - HTTPFlavorKey: semconv.HTTPFlavorKey, HTTPMethodKey: semconv.HTTPMethodKey, HTTPRequestContentLengthKey: semconv.HTTPRequestContentLengthKey, HTTPResponseContentLengthKey: semconv.HTTPResponseContentLengthKey, @@ -179,7 +152,7 @@ var hc = &httpConv{ HTTPStatusCodeKey: semconv.HTTPStatusCodeKey, HTTPTargetKey: semconv.HTTPTargetKey, HTTPURLKey: semconv.HTTPURLKey, - HTTPUserAgentKey: semconv.HTTPUserAgentKey, + UserAgentOriginalKey: semconv.UserAgentOriginalKey, } // ClientResponse returns attributes for an HTTP response received by a client @@ -193,6 +166,10 @@ var hc = &httpConv{ // // append(ClientResponse(resp), ClientRequest(resp.Request)...) func (c *httpConv) ClientResponse(resp *http.Response) []attribute.KeyValue { + /* The following semantic conventions are returned if present: + http.status_code int + http.response_content_length int + */ var n int if resp.StatusCode > 0 { n++ @@ -212,11 +189,31 @@ func (c *httpConv) ClientResponse(resp *http.Response) []attribute.KeyValue { } // ClientRequest returns attributes for an HTTP request made by a client. The -// following attributes are always returned: "http.url", "http.flavor", -// "http.method", "net.peer.name". The following attributes are returned if the -// related values are defined in req: "net.peer.port", "http.user_agent", -// "http.request_content_length", "enduser.id". +// following attributes are always returned: "http.url", "http.method", +// "net.peer.name". The following attributes are returned if the related values +// are defined in req: "net.peer.port", "user_agent.original", +// "http.request_content_length", "user_agent.original". func (c *httpConv) ClientRequest(req *http.Request) []attribute.KeyValue { + /* The following semantic conventions are returned if present: + http.method string + user_agent.original string + http.url string + net.peer.name string + net.peer.port int + http.request_content_length int + */ + + /* The following semantic conventions are not returned: + http.status_code This requires the response. See ClientResponse. + http.response_content_length This requires the response. See ClientResponse. + net.sock.family This requires the socket used. + net.sock.peer.addr This requires the socket used. + net.sock.peer.name This requires the socket used. + net.sock.peer.port This requires the socket used. + http.resend_count This is something outside of a single request. + net.protocol.name The value is the Request is ignored, and the go client will always use "http". + net.protocol.version The value in the Request is ignored, and the go client will always use 1.1 or 2.0. + */ n := 3 // URL, peer name, proto, and method. var h string if req.URL != nil { @@ -234,14 +231,10 @@ func (c *httpConv) ClientRequest(req *http.Request) []attribute.KeyValue { if req.ContentLength > 0 { n++ } - userID, _, hasUserID := req.BasicAuth() - if hasUserID { - n++ - } + attrs := make([]attribute.KeyValue, 0, n) attrs = append(attrs, c.method(req.Method)) - attrs = append(attrs, c.flavor(req.Proto)) var u string if req.URL != nil { @@ -260,15 +253,43 @@ func (c *httpConv) ClientRequest(req *http.Request) []attribute.KeyValue { } if useragent != "" { - attrs = append(attrs, c.HTTPUserAgentKey.String(useragent)) + attrs = append(attrs, c.UserAgentOriginalKey.String(useragent)) } if l := req.ContentLength; l > 0 { attrs = append(attrs, c.HTTPRequestContentLengthKey.Int64(l)) } - if hasUserID { - attrs = append(attrs, c.EnduserIDKey.String(userID)) + return attrs +} + +// ClientRequestMetrics returns metric attributes for an HTTP request made by a client. The +// following attributes are always returned: "http.method", "net.peer.name". +// The following attributes are returned if the related values +// are defined in req: "net.peer.port". +func (c *httpConv) ClientRequestMetrics(req *http.Request) []attribute.KeyValue { + /* The following semantic conventions are returned if present: + http.method string + net.peer.name string + net.peer.port int + */ + + n := 2 // method, peer name. + var h string + if req.URL != nil { + h = req.URL.Host + } + peer, p := firstHostPort(h, req.Header.Get("Host")) + port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", p) + if port > 0 { + n++ + } + + attrs := make([]attribute.KeyValue, 0, n) + attrs = append(attrs, c.method(req.Method), c.NetConv.PeerName(peer)) + + if port > 0 { + attrs = append(attrs, c.NetConv.PeerPort(port)) } return attrs @@ -291,18 +312,35 @@ func (c *httpConv) ClientRequest(req *http.Request) []attribute.KeyValue { // The req Host will be used to determine the server instead. // // The following attributes are always returned: "http.method", "http.scheme", -// "http.flavor", "http.target", "net.host.name". The following attributes are -// returned if they related values are defined in req: "net.host.port", -// "net.sock.peer.addr", "net.sock.peer.port", "http.user_agent", "enduser.id", -// "http.client_ip". +// "http.target", "net.host.name". The following attributes are returned if they +// related values are defined in req: "net.host.port", "net.sock.peer.addr", +// "net.sock.peer.port", "user_agent.original", "http.client_ip", +// "net.protocol.name", "net.protocol.version". func (c *httpConv) ServerRequest(server string, req *http.Request) []attribute.KeyValue { - // TODO: This currently does not add the specification required - // `http.target` attribute. It has too high of a cardinality to safely be - // added. An alternate should be added, or this comment removed, when it is - // addressed by the specification. If it is ultimately decided to continue - // not including the attribute, the HTTPTargetKey field of the httpConv - // should be removed as well. - + /* The following semantic conventions are returned if present: + http.method string + http.scheme string + net.host.name string + net.host.port int + net.sock.peer.addr string + net.sock.peer.port int + user_agent.original string + http.client_ip string + net.protocol.name string Note: not set if the value is "http". + net.protocol.version string + http.target string Note: doesn't include the query parameter. + */ + + /* The following semantic conventions are not returned: + http.status_code This requires the response. + http.request_content_length This requires the len() of body, which can mutate it. + http.response_content_length This requires the response. + http.route This is not available. + net.sock.peer.name This would require a DNS lookup. + net.sock.host.addr The request doesn't have access to the underlying socket. + net.sock.host.port The request doesn't have access to the underlying socket. + + */ n := 4 // Method, scheme, proto, and host name. var host string var p int @@ -330,19 +368,31 @@ func (c *httpConv) ServerRequest(server string, req *http.Request) []attribute.K if useragent != "" { n++ } - userID, _, hasUserID := req.BasicAuth() - if hasUserID { - n++ - } + clientIP := serverClientIP(req.Header.Get("X-Forwarded-For")) if clientIP != "" { n++ } + + var target string + if req.URL != nil { + target = req.URL.Path + if target != "" { + n++ + } + } + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" && protoName != "http" { + n++ + } + if protoVersion != "" { + n++ + } + attrs := make([]attribute.KeyValue, 0, n) attrs = append(attrs, c.method(req.Method)) attrs = append(attrs, c.scheme(req.TLS != nil)) - attrs = append(attrs, c.flavor(req.Proto)) attrs = append(attrs, c.NetConv.HostName(host)) if hostPort > 0 { @@ -359,17 +409,24 @@ func (c *httpConv) ServerRequest(server string, req *http.Request) []attribute.K } if useragent != "" { - attrs = append(attrs, c.HTTPUserAgentKey.String(useragent)) - } - - if hasUserID { - attrs = append(attrs, c.EnduserIDKey.String(userID)) + attrs = append(attrs, c.UserAgentOriginalKey.String(useragent)) } if clientIP != "" { attrs = append(attrs, c.HTTPClientIPKey.String(clientIP)) } + if target != "" { + attrs = append(attrs, c.HTTPTargetKey.String(target)) + } + + if protoName != "" && protoName != "http" { + attrs = append(attrs, c.NetConv.NetProtocolName.String(protoName)) + } + if protoVersion != "" { + attrs = append(attrs, c.NetConv.NetProtocolVersion.String(protoVersion)) + } + return attrs } @@ -391,17 +448,21 @@ func (c *httpConv) ServerRequest(server string, req *http.Request) []attribute.K // The req Host will be used to determine the server instead. // // The following attributes are always returned: "http.method", "http.scheme", -// "http.flavor", "net.host.name". The following attributes are -// returned if they related values are defined in req: "net.host.port". +// "net.host.name". The following attributes are returned if they related +// values are defined in req: "net.host.port". func (c *httpConv) ServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue { - // TODO: This currently does not add the specification required - // `http.target` attribute. It has too high of a cardinality to safely be - // added. An alternate should be added, or this comment removed, when it is - // addressed by the specification. If it is ultimately decided to continue - // not including the attribute, the HTTPTargetKey field of the httpConv - // should be removed as well. - - n := 4 // Method, scheme, proto, and host name. + /* The following semantic conventions are returned if present: + http.scheme string + http.route string + http.method string + http.status_code int + net.host.name string + net.host.port int + net.protocol.name string Note: not set if the value is "http". + net.protocol.version string + */ + + n := 3 // Method, scheme, and host name. var host string var p int if server == "" { @@ -417,16 +478,29 @@ func (c *httpConv) ServerRequestMetrics(server string, req *http.Request) []attr if hostPort > 0 { n++ } + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" { + n++ + } + if protoVersion != "" { + n++ + } + attrs := make([]attribute.KeyValue, 0, n) attrs = append(attrs, c.methodMetric(req.Method)) attrs = append(attrs, c.scheme(req.TLS != nil)) - attrs = append(attrs, c.flavor(req.Proto)) attrs = append(attrs, c.NetConv.HostName(host)) if hostPort > 0 { attrs = append(attrs, c.NetConv.HostPort(hostPort)) } + if protoName != "" { + attrs = append(attrs, c.NetConv.NetProtocolName.String(protoName)) + } + if protoVersion != "" { + attrs = append(attrs, c.NetConv.NetProtocolVersion.String(protoVersion)) + } return attrs } @@ -455,21 +529,6 @@ func (c *httpConv) scheme(https bool) attribute.KeyValue { // nolint:revive return c.HTTPSchemeHTTP } -func (c *httpConv) flavor(proto string) attribute.KeyValue { - switch proto { - case "HTTP/1.0": - return c.HTTPFlavorKey.String("1.0") - case "HTTP/1.1": - return c.HTTPFlavorKey.String("1.1") - case "HTTP/2": - return c.HTTPFlavorKey.String("2.0") - case "HTTP/3": - return c.HTTPFlavorKey.String("3.0") - default: - return c.HTTPFlavorKey.String(proto) - } -} - func serverClientIP(xForwardedFor string) string { if idx := strings.Index(xForwardedFor, ","); idx >= 0 { xForwardedFor = xForwardedFor[:idx] @@ -501,31 +560,6 @@ func firstHostPort(source ...string) (host string, port int) { return } -// RequestHeader returns the contents of h as OpenTelemetry attributes. -func (c *httpConv) RequestHeader(h http.Header) []attribute.KeyValue { - return c.header("http.request.header", h) -} - -// ResponseHeader returns the contents of h as OpenTelemetry attributes. -func (c *httpConv) ResponseHeader(h http.Header) []attribute.KeyValue { - return c.header("http.response.header", h) -} - -func (c *httpConv) header(prefix string, h http.Header) []attribute.KeyValue { - key := func(k string) attribute.Key { - k = strings.ToLower(k) - k = strings.ReplaceAll(k, "-", "_") - k = fmt.Sprintf("%s.%s", prefix, k) - return attribute.Key(k) - } - - attrs := make([]attribute.KeyValue, 0, len(h)) - for k, v := range h { - attrs = append(attrs, key(k).StringSlice(v)) - } - return attrs -} - // ClientStatus returns a span status code and message for an HTTP status code // value received by a client. func (c *httpConv) ClientStatus(code int) (codes.Code, string) { diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go index bde8893437..d3a06e0cad 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go @@ -22,7 +22,7 @@ import ( "strings" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.17.0" + semconv "go.opentelemetry.io/otel/semconv/v1.20.0" ) // NetTransport returns a trace attribute describing the transport protocol of the @@ -32,24 +32,6 @@ func NetTransport(network string) attribute.KeyValue { return nc.Transport(network) } -// NetClient returns trace attributes for a client network connection to address. -// See net.Dial for information about acceptable address values, address should -// be the same as the one used to create conn. If conn is nil, only network -// peer attributes will be returned that describe address. Otherwise, the -// socket level information about conn will also be included. -func NetClient(address string, conn net.Conn) []attribute.KeyValue { - return nc.Client(address, conn) -} - -// NetServer returns trace attributes for a network listener listening at address. -// See net.Listen for information about acceptable address values, address -// should be the same as the one used to create ln. If ln is nil, only network -// host attributes will be returned that describe address. Otherwise, the -// socket level information about ln will also be included. -func NetServer(address string, ln net.Listener) []attribute.KeyValue { - return nc.Server(address, ln) -} - // netConv are the network semantic convention attributes defined for a version // of the OpenTelemetry specification. type netConv struct { @@ -57,6 +39,8 @@ type netConv struct { NetHostPortKey attribute.Key NetPeerNameKey attribute.Key NetPeerPortKey attribute.Key + NetProtocolName attribute.Key + NetProtocolVersion attribute.Key NetSockFamilyKey attribute.Key NetSockPeerAddrKey attribute.Key NetSockPeerPortKey attribute.Key @@ -73,6 +57,8 @@ var nc = &netConv{ NetHostPortKey: semconv.NetHostPortKey, NetPeerNameKey: semconv.NetPeerNameKey, NetPeerPortKey: semconv.NetPeerPortKey, + NetProtocolName: semconv.NetProtocolNameKey, + NetProtocolVersion: semconv.NetProtocolVersionKey, NetSockFamilyKey: semconv.NetSockFamilyKey, NetSockPeerAddrKey: semconv.NetSockPeerAddrKey, NetSockPeerPortKey: semconv.NetSockPeerPortKey, @@ -121,52 +107,6 @@ func (c *netConv) Host(address string) []attribute.KeyValue { return attrs } -// Server returns attributes for a network listener listening at address. See -// net.Listen for information about acceptable address values, address should -// be the same as the one used to create ln. If ln is nil, only network host -// attributes will be returned that describe address. Otherwise, the socket -// level information about ln will also be included. -func (c *netConv) Server(address string, ln net.Listener) []attribute.KeyValue { - if ln == nil { - return c.Host(address) - } - - lAddr := ln.Addr() - if lAddr == nil { - return c.Host(address) - } - - hostName, hostPort := splitHostPort(address) - sockHostAddr, sockHostPort := splitHostPort(lAddr.String()) - network := lAddr.Network() - sockFamily := family(network, sockHostAddr) - - n := nonZeroStr(hostName, network, sockHostAddr, sockFamily) - n += positiveInt(hostPort, sockHostPort) - attr := make([]attribute.KeyValue, 0, n) - if hostName != "" { - attr = append(attr, c.HostName(hostName)) - if hostPort > 0 { - // Only if net.host.name is set should net.host.port be. - attr = append(attr, c.HostPort(hostPort)) - } - } - if network != "" { - attr = append(attr, c.Transport(network)) - } - if sockFamily != "" { - attr = append(attr, c.NetSockFamilyKey.String(sockFamily)) - } - if sockHostAddr != "" { - attr = append(attr, c.NetSockHostAddrKey.String(sockHostAddr)) - if sockHostPort > 0 { - // Only if net.sock.host.addr is set should net.sock.host.port be. - attr = append(attr, c.NetSockHostPortKey.Int(sockHostPort)) - } - } - return attr -} - func (c *netConv) HostName(name string) attribute.KeyValue { return c.NetHostNameKey.String(name) } @@ -175,85 +115,6 @@ func (c *netConv) HostPort(port int) attribute.KeyValue { return c.NetHostPortKey.Int(port) } -// Client returns attributes for a client network connection to address. See -// net.Dial for information about acceptable address values, address should be -// the same as the one used to create conn. If conn is nil, only network peer -// attributes will be returned that describe address. Otherwise, the socket -// level information about conn will also be included. -func (c *netConv) Client(address string, conn net.Conn) []attribute.KeyValue { - if conn == nil { - return c.Peer(address) - } - - lAddr, rAddr := conn.LocalAddr(), conn.RemoteAddr() - - var network string - switch { - case lAddr != nil: - network = lAddr.Network() - case rAddr != nil: - network = rAddr.Network() - default: - return c.Peer(address) - } - - peerName, peerPort := splitHostPort(address) - var ( - sockFamily string - sockPeerAddr string - sockPeerPort int - sockHostAddr string - sockHostPort int - ) - - if lAddr != nil { - sockHostAddr, sockHostPort = splitHostPort(lAddr.String()) - } - - if rAddr != nil { - sockPeerAddr, sockPeerPort = splitHostPort(rAddr.String()) - } - - switch { - case sockHostAddr != "": - sockFamily = family(network, sockHostAddr) - case sockPeerAddr != "": - sockFamily = family(network, sockPeerAddr) - } - - n := nonZeroStr(peerName, network, sockPeerAddr, sockHostAddr, sockFamily) - n += positiveInt(peerPort, sockPeerPort, sockHostPort) - attr := make([]attribute.KeyValue, 0, n) - if peerName != "" { - attr = append(attr, c.PeerName(peerName)) - if peerPort > 0 { - // Only if net.peer.name is set should net.peer.port be. - attr = append(attr, c.PeerPort(peerPort)) - } - } - if network != "" { - attr = append(attr, c.Transport(network)) - } - if sockFamily != "" { - attr = append(attr, c.NetSockFamilyKey.String(sockFamily)) - } - if sockPeerAddr != "" { - attr = append(attr, c.NetSockPeerAddrKey.String(sockPeerAddr)) - if sockPeerPort > 0 { - // Only if net.sock.peer.addr is set should net.sock.peer.port be. - attr = append(attr, c.NetSockPeerPortKey.Int(sockPeerPort)) - } - } - if sockHostAddr != "" { - attr = append(attr, c.NetSockHostAddrKey.String(sockHostAddr)) - if sockHostPort > 0 { - // Only if net.sock.host.addr is set should net.sock.host.port be. - attr = append(attr, c.NetSockHostPortKey.Int(sockHostPort)) - } - } - return attr -} - func family(network, address string) string { switch network { case "unix", "unixgram", "unixpacket": @@ -269,26 +130,6 @@ func family(network, address string) string { return "" } -func nonZeroStr(strs ...string) int { - var n int - for _, str := range strs { - if str != "" { - n++ - } - } - return n -} - -func positiveInt(ints ...int) int { - var n int - for _, i := range ints { - if i > 0 { - n++ - } - } - return n -} - // Peer returns attributes for a network peer address. func (c *netConv) Peer(address string) []attribute.KeyValue { h, p := splitHostPort(address) @@ -366,3 +207,9 @@ func splitHostPort(hostport string) (host string, port int) { } return host, int(p) } + +func netProtocol(proto string) (name string, version string) { + name, version, _ = strings.Cut(proto, "/") + name = strings.ToLower(name) + return name, version +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go index e835cac12e..43e937a67a 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go @@ -19,31 +19,43 @@ import ( "io" "net/http" "net/http/httptrace" + "sync/atomic" + "time" + + "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" + + semconv "go.opentelemetry.io/otel/semconv/v1.20.0" ) // Transport implements the http.RoundTripper interface and wraps -// outbound HTTP(S) requests with a span. +// outbound HTTP(S) requests with a span and enriches it with metrics. type Transport struct { rt http.RoundTripper tracer trace.Tracer + meter metric.Meter propagators propagation.TextMapPropagator spanStartOptions []trace.SpanStartOption filters []Filter spanNameFormatter func(string, *http.Request) string clientTrace func(context.Context) *httptrace.ClientTrace + + requestBytesCounter metric.Int64Counter + responseBytesCounter metric.Int64Counter + latencyMeasure metric.Float64Histogram } var _ http.RoundTripper = &Transport{} // NewTransport wraps the provided http.RoundTripper with one that -// starts a span and injects the span context into the outbound request headers. +// starts a span, injects the span context into the outbound request headers, +// and enriches it with metrics. // // If the provided http.RoundTripper is nil, http.DefaultTransport will be used // as the base http.RoundTripper. @@ -63,12 +75,14 @@ func NewTransport(base http.RoundTripper, opts ...Option) *Transport { c := newConfig(append(defaultOpts, opts...)...) t.applyConfig(c) + t.createMeasures() return &t } func (t *Transport) applyConfig(c *config) { t.tracer = c.Tracer + t.meter = c.Meter t.propagators = c.Propagators t.spanStartOptions = c.SpanStartOptions t.filters = c.Filters @@ -76,6 +90,30 @@ func (t *Transport) applyConfig(c *config) { t.clientTrace = c.ClientTrace } +func (t *Transport) createMeasures() { + var err error + t.requestBytesCounter, err = t.meter.Int64Counter( + clientRequestSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP request messages."), + ) + handleErr(err) + + t.responseBytesCounter, err = t.meter.Int64Counter( + clientResponseSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP response messages."), + ) + handleErr(err) + + t.latencyMeasure, err = t.meter.Float64Histogram( + clientDuration, + metric.WithUnit("ms"), + metric.WithDescription("Measures the duration of outbound HTTP requests."), + ) + handleErr(err) +} + func defaultTransportFormatter(_ string, r *http.Request) string { return "HTTP " + r.Method } @@ -84,6 +122,7 @@ func defaultTransportFormatter(_ string, r *http.Request) string { // before handing the request to the configured base RoundTripper. The created span will // end when the response body is closed or when a read from the body returns io.EOF. func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { + requestStartTime := time.Now() for _, f := range t.filters { if !f(r) { // Simply pass through to the base RoundTripper if a filter rejects the request @@ -109,7 +148,23 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { ctx = httptrace.WithClientTrace(ctx, t.clientTrace(ctx)) } + labeler := &Labeler{} + ctx = injectLabeler(ctx, labeler) + r = r.Clone(ctx) // According to RoundTripper spec, we shouldn't modify the origin request. + + // use a body wrapper to determine the request size + var bw bodyWrapper + // if request body is nil or NoBody, we don't want to mutate the body as it + // will affect the identity of it in an unforeseeable way because we assert + // ReadCloser fulfills a certain interface and it is indeed nil or NoBody. + if r.Body != nil && r.Body != http.NoBody { + bw.ReadCloser = r.Body + // noop to prevent nil panic. not using this record fun yet. + bw.record = func(int64) {} + r.Body = &bw + } + span.SetAttributes(semconvutil.HTTPClientRequest(r)...) t.propagators.Inject(ctx, propagation.HeaderCarrier(r.Header)) @@ -121,9 +176,28 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { return res, err } + // metrics + metricAttrs := append(labeler.Get(), semconvutil.HTTPClientRequestMetrics(r)...) + if res.StatusCode > 0 { + metricAttrs = append(metricAttrs, semconv.HTTPStatusCode(res.StatusCode)) + } + o := metric.WithAttributes(metricAttrs...) + t.requestBytesCounter.Add(ctx, bw.read.Load(), o) + // For handling response bytes we leverage a callback when the client reads the http response + readRecordFunc := func(n int64) { + t.responseBytesCounter.Add(ctx, n, o) + } + + // traces span.SetAttributes(semconvutil.HTTPClientResponse(res)...) span.SetStatus(semconvutil.HTTPClientStatus(res.StatusCode)) - res.Body = newWrappedBody(span, res.Body) + + res.Body = newWrappedBody(span, readRecordFunc, res.Body) + + // Use floating point division here for higher precision (instead of Millisecond method). + elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) + + t.latencyMeasure.Record(ctx, elapsedTime, o) return res, err } @@ -131,17 +205,17 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { // newWrappedBody returns a new and appropriately scoped *wrappedBody as an // io.ReadCloser. If the passed body implements io.Writer, the returned value // will implement io.ReadWriteCloser. -func newWrappedBody(span trace.Span, body io.ReadCloser) io.ReadCloser { +func newWrappedBody(span trace.Span, record func(n int64), body io.ReadCloser) io.ReadCloser { // The successful protocol switch responses will have a body that // implement an io.ReadWriteCloser. Ensure this interface type continues // to be satisfied if that is the case. if _, ok := body.(io.ReadWriteCloser); ok { - return &wrappedBody{span: span, body: body} + return &wrappedBody{span: span, record: record, body: body} } // Remove the implementation of the io.ReadWriteCloser and only implement // the io.ReadCloser. - return struct{ io.ReadCloser }{&wrappedBody{span: span, body: body}} + return struct{ io.ReadCloser }{&wrappedBody{span: span, record: record, body: body}} } // wrappedBody is the response body type returned by the transport @@ -153,8 +227,11 @@ func newWrappedBody(span trace.Span, body io.ReadCloser) io.ReadCloser { // If the response body implements the io.Writer interface (i.e. for // successful protocol switches), the wrapped body also will. type wrappedBody struct { - span trace.Span - body io.ReadCloser + span trace.Span + recorded atomic.Bool + record func(n int64) + body io.ReadCloser + read atomic.Int64 } var _ io.ReadWriteCloser = &wrappedBody{} @@ -171,11 +248,14 @@ func (wb *wrappedBody) Write(p []byte) (int, error) { func (wb *wrappedBody) Read(b []byte) (int, error) { n, err := wb.body.Read(b) + // Record the number of bytes read + wb.read.Add(int64(n)) switch err { case nil: // nothing to do here but fall through to the return case io.EOF: + wb.recordBytesRead() wb.span.End() default: wb.span.RecordError(err) @@ -184,7 +264,20 @@ func (wb *wrappedBody) Read(b []byte) (int, error) { return n, err } +// recordBytesRead is a function that ensures the number of bytes read is recorded once and only once. +func (wb *wrappedBody) recordBytesRead() { + // note: it is more performant (and equally correct) to use atomic.Bool over sync.Once here. In the event that + // two goroutines are racing to call this method, the number of bytes read will no longer increase. Using + // CompareAndSwap allows later goroutines to return quickly and not block waiting for the race winner to finish + // calling wb.record(wb.read.Load()). + if wb.recorded.CompareAndSwap(false, true) { + // Record the total number of bytes read + wb.record(wb.read.Load()) + } +} + func (wb *wrappedBody) Close() error { + wb.recordBytesRead() wb.span.End() if wb.body != nil { return wb.body.Close() diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go index 6eace875cf..35254e888f 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -16,7 +16,7 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http // Version is the current release version of the otelhttp instrumentation. func Version() string { - return "0.45.0" + return "0.49.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go index 11a35ed167..2852ec9717 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go @@ -18,6 +18,7 @@ import ( "context" "io" "net/http" + "sync/atomic" "go.opentelemetry.io/otel/propagation" ) @@ -30,14 +31,14 @@ type bodyWrapper struct { io.ReadCloser record func(n int64) // must not be nil - read int64 + read atomic.Int64 err error } func (w *bodyWrapper) Read(b []byte) (int, error) { n, err := w.ReadCloser.Read(b) n1 := int64(n) - w.read += n1 + w.read.Add(n1) w.err = err w.record(n1) return n, err diff --git a/vendor/go.opentelemetry.io/otel/.codespellignore b/vendor/go.opentelemetry.io/otel/.codespellignore index ae6a3bcf12..120b63a9c7 100644 --- a/vendor/go.opentelemetry.io/otel/.codespellignore +++ b/vendor/go.opentelemetry.io/otel/.codespellignore @@ -3,3 +3,5 @@ fo te collison consequentially +ans +nam diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index fe670d79cc..98f2d20438 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -8,6 +8,80 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ## [Unreleased] +## [1.24.0/0.46.0/0.0.1-alpha] 2024-02-23 + +This release is the last to support [Go 1.20]. +The next release will require at least [Go 1.21]. + +### Added + +- Support [Go 1.22]. (#4890) +- Add exemplar support to `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4900) +- Add exemplar support to `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4900) +- The `go.opentelemetry.io/otel/log` module is added. + This module includes OpenTelemetry Go's implementation of the Logs Bridge API. + This module is in an alpha state, it is subject to breaking changes. + See our [versioning policy](./VERSIONING.md) for more info. (#4961) + +### Fixed + +- Fix registration of multiple callbacks when using the global meter provider from `go.opentelemetry.io/otel`. (#4945) +- Fix negative buckets in output of exponential histograms. (#4956) + +## [1.23.1] 2024-02-07 + +### Fixed + +- Register all callbacks passed during observable instrument creation instead of just the last one multiple times in `go.opentelemetry.io/otel/sdk/metric`. (#4888) + +## [1.23.0] 2024-02-06 + +This release contains the first stable, `v1`, release of the following modules: + +- `go.opentelemetry.io/otel/bridge/opencensus` +- `go.opentelemetry.io/otel/bridge/opencensus/test` +- `go.opentelemetry.io/otel/example/opencensus` +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` +- `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` + +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Added + +- Add `WithEndpointURL` option to the `exporters/otlp/otlpmetric/otlpmetricgrpc`, `exporters/otlp/otlpmetric/otlpmetrichttp`, `exporters/otlp/otlptrace/otlptracegrpc` and `exporters/otlp/otlptrace/otlptracehttp` packages. (#4808) +- Experimental exemplar exporting is added to the metric SDK. + See [metric documentation](./sdk/metric/internal/x/README.md#exemplars) for more information about this feature and how to enable it. (#4871) +- `ErrSchemaURLConflict` is added to `go.opentelemetry.io/otel/sdk/resource`. + This error is returned when a merge of two `Resource`s with different (non-empty) schema URL is attempted. (#4876) + +### Changed + +- The `Merge` and `New` functions in `go.opentelemetry.io/otel/sdk/resource` now returns a partial result if there is a schema URL merge conflict. + Instead of returning `nil` when two `Resource`s with different (non-empty) schema URLs are merged the merged `Resource`, along with the new `ErrSchemaURLConflict` error, is returned. + It is up to the user to decide if they want to use the returned `Resource` or not. + It may have desired attributes overwritten or include stale semantic conventions. (#4876) + +### Fixed + +- Fix `ContainerID` resource detection on systemd when cgroup path has a colon. (#4449) +- Fix `go.opentelemetry.io/otel/sdk/metric` to cache instruments to avoid leaking memory when the same instrument is created multiple times. (#4820) +- Fix missing `Mix` and `Max` values for `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` by introducing `MarshalText` and `MarshalJSON` for the `Extrema` type in `go.opentelemetry.io/sdk/metric/metricdata`. (#4827) + +## [1.23.0-rc.1] 2024-01-18 + +This is a release candidate for the v1.23.0 release. +That release is expected to include the `v1` release of the following modules: + +- `go.opentelemetry.io/otel/bridge/opencensus` +- `go.opentelemetry.io/otel/bridge/opencensus/test` +- `go.opentelemetry.io/otel/example/opencensus` +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` +- `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` + +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + ## [1.22.0/0.45.0] 2024-01-17 ### Added @@ -22,13 +96,13 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm The package contains semantic conventions from the `v1.24.0` version of the OpenTelemetry Semantic Conventions. (#4770) - Add `WithResourceAsConstantLabels` option to apply resource attributes for every metric emitted by the Prometheus exporter. (#4733) - Experimental cardinality limiting is added to the metric SDK. - See [metric documentation](./sdk/metric/EXPERIMENTAL.md#cardinality-limit) for more information about this feature and how to enable it. (#4457) + See [metric documentation](./sdk/metric/internal/x/README.md#cardinality-limit) for more information about this feature and how to enable it. (#4457) - Add `NewMemberRaw` and `NewKeyValuePropertyRaw` in `go.opentelemetry.io/otel/baggage`. (#4804) ### Changed - Upgrade all use of `go.opentelemetry.io/otel/semconv` to use `v1.24.0`. (#4754) -- Update transformations in `go.opentelemetry.io/otel/exporters/zipkin` to follow `v1.19.0` version of the OpenTelemetry specification. (#4754) +- Update transformations in `go.opentelemetry.io/otel/exporters/zipkin` to follow `v1.24.0` version of the OpenTelemetry specification. (#4754) - Record synchronous measurements when the passed context is canceled instead of dropping in `go.opentelemetry.io/otel/sdk/metric`. If you do not want to make a measurement when the context is cancelled, you need to handle it yourself (e.g `if ctx.Err() != nil`). (#4671) - Improve `go.opentelemetry.io/otel/trace.TraceState`'s performance. (#4722) @@ -2775,7 +2849,11 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.22.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.24.0...HEAD +[1.24.0/0.46.0/0.0.1-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.24.0 +[1.23.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.1 +[1.23.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.0 +[1.23.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.0-rc.1 [1.22.0/0.45.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.22.0 [1.21.0/0.44.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.21.0 [1.20.0/0.43.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.20.0 @@ -2850,6 +2928,8 @@ It contains api and sdk for trace and meter. [0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1 [0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0 +[Go 1.22]: https://go.dev/doc/go1.22 +[Go 1.21]: https://go.dev/doc/go1.21 [Go 1.20]: https://go.dev/doc/go1.20 [Go 1.19]: https://go.dev/doc/go1.19 [Go 1.18]: https://go.dev/doc/go1.18 diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS index 623740007d..31d336d922 100644 --- a/vendor/go.opentelemetry.io/otel/CODEOWNERS +++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS @@ -14,4 +14,4 @@ * @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu -CODEOWNERS @MrAlias @MadVikingGod @pellared \ No newline at end of file +CODEOWNERS @MrAlias @MadVikingGod @pellared @dashpole \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index 31857a6173..c9f2bac55b 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -617,13 +617,13 @@ should be canceled. - [Evan Torrie](https://github.com/evantorrie), Verizon Media - [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics -- [David Ashpole](https://github.com/dashpole), Google - [Chester Cheung](https://github.com/hanyuancheung), Tencent - [Damien Mathieu](https://github.com/dmathieu), Elastic - [Anthony Mirabella](https://github.com/Aneurysm9), AWS ### Maintainers +- [David Ashpole](https://github.com/dashpole), Google - [Aaron Clawson](https://github.com/MadVikingGod), LightStep - [Robert Pająk](https://github.com/pellared), Splunk - [Tyler Yahn](https://github.com/MrAlias), Splunk diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index 35fc189961..6de95219be 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -192,7 +192,7 @@ test-coverage: | $(GOCOVMERGE) done; \ $(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt -# Adding a directory will include all benchmarks in that direcotry if a filter is not specified. +# Adding a directory will include all benchmarks in that directory if a filter is not specified. BENCHMARK_TARGETS := sdk/trace .PHONY: benchmark benchmark: $(BENCHMARK_TARGETS:%=benchmark/%) @@ -315,4 +315,4 @@ add-tags: | $(MULTIMOD) .PHONY: lint-markdown lint-markdown: - docker run -v "$(CURDIR):$(WORKDIR)" docker://avtodev/markdown-lint:v1 -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md + docker run -v "$(CURDIR):$(WORKDIR)" avtodev/markdown-lint:v1 -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index 44e1bfc9b5..7766259a5c 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -11,14 +11,11 @@ It provides a set of APIs to directly measure performance and behavior of your s ## Project Status -| Signal | Status | -|---------|------------| -| Traces | Stable | -| Metrics | Stable | -| Logs | Design [1] | - -- [1]: Currently the logs signal development is in a design phase ([#4696](https://github.com/open-telemetry/opentelemetry-go/issues/4696)). - No Logs Pull Requests are currently being accepted. +| Signal | Status | +|---------|--------------------| +| Traces | Stable | +| Metrics | Stable | +| Logs | In development[^1] | Progress and status specific to this repository is tracked in our [project boards](https://github.com/open-telemetry/opentelemetry-go/projects) @@ -28,6 +25,8 @@ and Project versioning information and stability guarantees can be found in the [versioning documentation](VERSIONING.md). +[^1]: https://github.com/orgs/open-telemetry/projects/43 + ### Compatibility OpenTelemetry-Go ensures compatibility with the current supported versions of @@ -50,14 +49,19 @@ Currently, this project supports the following environments. | OS | Go Version | Architecture | |---------|------------|--------------| +| Ubuntu | 1.22 | amd64 | | Ubuntu | 1.21 | amd64 | | Ubuntu | 1.20 | amd64 | +| Ubuntu | 1.22 | 386 | | Ubuntu | 1.21 | 386 | | Ubuntu | 1.20 | 386 | +| MacOS | 1.22 | amd64 | | MacOS | 1.21 | amd64 | | MacOS | 1.20 | amd64 | +| Windows | 1.22 | amd64 | | Windows | 1.21 | amd64 | | Windows | 1.20 | amd64 | +| Windows | 1.22 | 386 | | Windows | 1.21 | 386 | | Windows | 1.20 | 386 | diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go index 7e6765b06b..fb6da51450 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/set.go +++ b/vendor/go.opentelemetry.io/otel/attribute/set.go @@ -427,7 +427,7 @@ func (l *Set) MarshalJSON() ([]byte, error) { return json.Marshal(l.equivalent.iface) } -// MarshalLog is the marshaling function used by the logging system to represent this exporter. +// MarshalLog is the marshaling function used by the logging system to represent this Set. func (l Set) MarshalLog() interface{} { kvs := make(map[string]string) for _, kv := range l.ToSlice() { diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go index 0097db478c..7ed61c0e25 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/meter.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go @@ -130,9 +130,11 @@ func (m *meter) setDelegate(provider metric.MeterProvider) { inst.setDelegate(meter) } - for e := m.registry.Front(); e != nil; e = e.Next() { + var n *list.Element + for e := m.registry.Front(); e != nil; e = n { r := e.Value.(*registration) r.setDelegate(meter) + n = e.Next() m.registry.Remove(e) } diff --git a/vendor/go.opentelemetry.io/otel/internal/global/state.go b/vendor/go.opentelemetry.io/otel/internal/global/state.go index 7985005bcb..386c8bfdc0 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/state.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/state.go @@ -63,7 +63,7 @@ func SetTracerProvider(tp trace.TracerProvider) { // to itself. Error( errors.New("no delegate configured in tracer provider"), - "Setting tracer provider to it's current value. No delegate will be configured", + "Setting tracer provider to its current value. No delegate will be configured", ) return } @@ -92,7 +92,7 @@ func SetTextMapPropagator(p propagation.TextMapPropagator) { // delegate to itself. Error( errors.New("no delegate configured in text map propagator"), - "Setting text map propagator to it's current value. No delegate will be configured", + "Setting text map propagator to its current value. No delegate will be configured", ) return } @@ -123,7 +123,7 @@ func SetMeterProvider(mp metric.MeterProvider) { // to itself. Error( errors.New("no delegate configured in meter provider"), - "Setting meter provider to it's current value. No delegate will be configured", + "Setting meter provider to its current value. No delegate will be configured", ) return } diff --git a/vendor/go.opentelemetry.io/otel/semconv/internal/http.go b/vendor/go.opentelemetry.io/otel/semconv/internal/http.go deleted file mode 100644 index 19c394c69b..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/internal/http.go +++ /dev/null @@ -1,338 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal // import "go.opentelemetry.io/otel/semconv/internal" - -import ( - "fmt" - "net" - "net/http" - "strconv" - "strings" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/trace" -) - -// SemanticConventions are the semantic convention values defined for a -// version of the OpenTelemetry specification. -type SemanticConventions struct { - EnduserIDKey attribute.Key - HTTPClientIPKey attribute.Key - HTTPFlavorKey attribute.Key - HTTPHostKey attribute.Key - HTTPMethodKey attribute.Key - HTTPRequestContentLengthKey attribute.Key - HTTPRouteKey attribute.Key - HTTPSchemeHTTP attribute.KeyValue - HTTPSchemeHTTPS attribute.KeyValue - HTTPServerNameKey attribute.Key - HTTPStatusCodeKey attribute.Key - HTTPTargetKey attribute.Key - HTTPURLKey attribute.Key - HTTPUserAgentKey attribute.Key - NetHostIPKey attribute.Key - NetHostNameKey attribute.Key - NetHostPortKey attribute.Key - NetPeerIPKey attribute.Key - NetPeerNameKey attribute.Key - NetPeerPortKey attribute.Key - NetTransportIP attribute.KeyValue - NetTransportOther attribute.KeyValue - NetTransportTCP attribute.KeyValue - NetTransportUDP attribute.KeyValue - NetTransportUnix attribute.KeyValue -} - -// NetAttributesFromHTTPRequest generates attributes of the net -// namespace as specified by the OpenTelemetry specification for a -// span. The network parameter is a string that net.Dial function -// from standard library can understand. -func (sc *SemanticConventions) NetAttributesFromHTTPRequest(network string, request *http.Request) []attribute.KeyValue { - attrs := []attribute.KeyValue{} - - switch network { - case "tcp", "tcp4", "tcp6": - attrs = append(attrs, sc.NetTransportTCP) - case "udp", "udp4", "udp6": - attrs = append(attrs, sc.NetTransportUDP) - case "ip", "ip4", "ip6": - attrs = append(attrs, sc.NetTransportIP) - case "unix", "unixgram", "unixpacket": - attrs = append(attrs, sc.NetTransportUnix) - default: - attrs = append(attrs, sc.NetTransportOther) - } - - peerIP, peerName, peerPort := hostIPNamePort(request.RemoteAddr) - if peerIP != "" { - attrs = append(attrs, sc.NetPeerIPKey.String(peerIP)) - } - if peerName != "" { - attrs = append(attrs, sc.NetPeerNameKey.String(peerName)) - } - if peerPort != 0 { - attrs = append(attrs, sc.NetPeerPortKey.Int(peerPort)) - } - - hostIP, hostName, hostPort := "", "", 0 - for _, someHost := range []string{request.Host, request.Header.Get("Host"), request.URL.Host} { - hostIP, hostName, hostPort = hostIPNamePort(someHost) - if hostIP != "" || hostName != "" || hostPort != 0 { - break - } - } - if hostIP != "" { - attrs = append(attrs, sc.NetHostIPKey.String(hostIP)) - } - if hostName != "" { - attrs = append(attrs, sc.NetHostNameKey.String(hostName)) - } - if hostPort != 0 { - attrs = append(attrs, sc.NetHostPortKey.Int(hostPort)) - } - - return attrs -} - -// hostIPNamePort extracts the IP address, name and (optional) port from hostWithPort. -// It handles both IPv4 and IPv6 addresses. If the host portion is not recognized -// as a valid IPv4 or IPv6 address, the `ip` result will be empty and the -// host portion will instead be returned in `name`. -func hostIPNamePort(hostWithPort string) (ip string, name string, port int) { - var ( - hostPart, portPart string - parsedPort uint64 - err error - ) - if hostPart, portPart, err = net.SplitHostPort(hostWithPort); err != nil { - hostPart, portPart = hostWithPort, "" - } - if parsedIP := net.ParseIP(hostPart); parsedIP != nil { - ip = parsedIP.String() - } else { - name = hostPart - } - if parsedPort, err = strconv.ParseUint(portPart, 10, 16); err == nil { - port = int(parsedPort) - } - return -} - -// EndUserAttributesFromHTTPRequest generates attributes of the -// enduser namespace as specified by the OpenTelemetry specification -// for a span. -func (sc *SemanticConventions) EndUserAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { - if username, _, ok := request.BasicAuth(); ok { - return []attribute.KeyValue{sc.EnduserIDKey.String(username)} - } - return nil -} - -// HTTPClientAttributesFromHTTPRequest generates attributes of the -// http namespace as specified by the OpenTelemetry specification for -// a span on the client side. -func (sc *SemanticConventions) HTTPClientAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { - attrs := []attribute.KeyValue{} - - // remove any username/password info that may be in the URL - // before adding it to the attributes - userinfo := request.URL.User - request.URL.User = nil - - attrs = append(attrs, sc.HTTPURLKey.String(request.URL.String())) - - // restore any username/password info that was removed - request.URL.User = userinfo - - return append(attrs, sc.httpCommonAttributesFromHTTPRequest(request)...) -} - -func (sc *SemanticConventions) httpCommonAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { - attrs := []attribute.KeyValue{} - if ua := request.UserAgent(); ua != "" { - attrs = append(attrs, sc.HTTPUserAgentKey.String(ua)) - } - if request.ContentLength > 0 { - attrs = append(attrs, sc.HTTPRequestContentLengthKey.Int64(request.ContentLength)) - } - - return append(attrs, sc.httpBasicAttributesFromHTTPRequest(request)...) -} - -func (sc *SemanticConventions) httpBasicAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { - // as these attributes are used by HTTPServerMetricAttributesFromHTTPRequest, they should be low-cardinality - attrs := []attribute.KeyValue{} - - if request.TLS != nil { - attrs = append(attrs, sc.HTTPSchemeHTTPS) - } else { - attrs = append(attrs, sc.HTTPSchemeHTTP) - } - - if request.Host != "" { - attrs = append(attrs, sc.HTTPHostKey.String(request.Host)) - } else if request.URL != nil && request.URL.Host != "" { - attrs = append(attrs, sc.HTTPHostKey.String(request.URL.Host)) - } - - flavor := "" - if request.ProtoMajor == 1 { - flavor = fmt.Sprintf("1.%d", request.ProtoMinor) - } else if request.ProtoMajor == 2 { - flavor = "2" - } - if flavor != "" { - attrs = append(attrs, sc.HTTPFlavorKey.String(flavor)) - } - - if request.Method != "" { - attrs = append(attrs, sc.HTTPMethodKey.String(request.Method)) - } else { - attrs = append(attrs, sc.HTTPMethodKey.String(http.MethodGet)) - } - - return attrs -} - -// HTTPServerMetricAttributesFromHTTPRequest generates low-cardinality attributes -// to be used with server-side HTTP metrics. -func (sc *SemanticConventions) HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http.Request) []attribute.KeyValue { - attrs := []attribute.KeyValue{} - if serverName != "" { - attrs = append(attrs, sc.HTTPServerNameKey.String(serverName)) - } - return append(attrs, sc.httpBasicAttributesFromHTTPRequest(request)...) -} - -// HTTPServerAttributesFromHTTPRequest generates attributes of the -// http namespace as specified by the OpenTelemetry specification for -// a span on the server side. Currently, only basic authentication is -// supported. -func (sc *SemanticConventions) HTTPServerAttributesFromHTTPRequest(serverName, route string, request *http.Request) []attribute.KeyValue { - attrs := []attribute.KeyValue{ - sc.HTTPTargetKey.String(request.RequestURI), - } - - if serverName != "" { - attrs = append(attrs, sc.HTTPServerNameKey.String(serverName)) - } - if route != "" { - attrs = append(attrs, sc.HTTPRouteKey.String(route)) - } - if values := request.Header["X-Forwarded-For"]; len(values) > 0 { - addr := values[0] - if i := strings.Index(addr, ","); i > 0 { - addr = addr[:i] - } - attrs = append(attrs, sc.HTTPClientIPKey.String(addr)) - } - - return append(attrs, sc.httpCommonAttributesFromHTTPRequest(request)...) -} - -// HTTPAttributesFromHTTPStatusCode generates attributes of the http -// namespace as specified by the OpenTelemetry specification for a -// span. -func (sc *SemanticConventions) HTTPAttributesFromHTTPStatusCode(code int) []attribute.KeyValue { - attrs := []attribute.KeyValue{ - sc.HTTPStatusCodeKey.Int(code), - } - return attrs -} - -type codeRange struct { - fromInclusive int - toInclusive int -} - -func (r codeRange) contains(code int) bool { - return r.fromInclusive <= code && code <= r.toInclusive -} - -var validRangesPerCategory = map[int][]codeRange{ - 1: { - {http.StatusContinue, http.StatusEarlyHints}, - }, - 2: { - {http.StatusOK, http.StatusAlreadyReported}, - {http.StatusIMUsed, http.StatusIMUsed}, - }, - 3: { - {http.StatusMultipleChoices, http.StatusUseProxy}, - {http.StatusTemporaryRedirect, http.StatusPermanentRedirect}, - }, - 4: { - {http.StatusBadRequest, http.StatusTeapot}, // yes, teapot is so useful… - {http.StatusMisdirectedRequest, http.StatusUpgradeRequired}, - {http.StatusPreconditionRequired, http.StatusTooManyRequests}, - {http.StatusRequestHeaderFieldsTooLarge, http.StatusRequestHeaderFieldsTooLarge}, - {http.StatusUnavailableForLegalReasons, http.StatusUnavailableForLegalReasons}, - }, - 5: { - {http.StatusInternalServerError, http.StatusLoopDetected}, - {http.StatusNotExtended, http.StatusNetworkAuthenticationRequired}, - }, -} - -// SpanStatusFromHTTPStatusCode generates a status code and a message -// as specified by the OpenTelemetry specification for a span. -func SpanStatusFromHTTPStatusCode(code int) (codes.Code, string) { - spanCode, valid := validateHTTPStatusCode(code) - if !valid { - return spanCode, fmt.Sprintf("Invalid HTTP status code %d", code) - } - return spanCode, "" -} - -// SpanStatusFromHTTPStatusCodeAndSpanKind generates a status code and a message -// as specified by the OpenTelemetry specification for a span. -// Exclude 4xx for SERVER to set the appropriate status. -func SpanStatusFromHTTPStatusCodeAndSpanKind(code int, spanKind trace.SpanKind) (codes.Code, string) { - spanCode, valid := validateHTTPStatusCode(code) - if !valid { - return spanCode, fmt.Sprintf("Invalid HTTP status code %d", code) - } - category := code / 100 - if spanKind == trace.SpanKindServer && category == 4 { - return codes.Unset, "" - } - return spanCode, "" -} - -// validateHTTPStatusCode validates the HTTP status code and returns -// corresponding span status code. If the `code` is not a valid HTTP status -// code, returns span status Error and false. -func validateHTTPStatusCode(code int) (codes.Code, bool) { - category := code / 100 - ranges, ok := validRangesPerCategory[category] - if !ok { - return codes.Error, false - } - ok = false - for _, crange := range ranges { - ok = crange.contains(code) - if ok { - break - } - } - if !ok { - return codes.Error, false - } - if category > 0 && category < 4 { - return codes.Unset, true - } - return codes.Error, true -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/internal/v2/http.go b/vendor/go.opentelemetry.io/otel/semconv/internal/v2/http.go new file mode 100644 index 0000000000..12d6b520f5 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/internal/v2/http.go @@ -0,0 +1,404 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "go.opentelemetry.io/otel/semconv/internal/v2" + +import ( + "fmt" + "net/http" + "strings" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" +) + +// HTTPConv are the HTTP semantic convention attributes defined for a version +// of the OpenTelemetry specification. +type HTTPConv struct { + NetConv *NetConv + + EnduserIDKey attribute.Key + HTTPClientIPKey attribute.Key + HTTPFlavorKey attribute.Key + HTTPMethodKey attribute.Key + HTTPRequestContentLengthKey attribute.Key + HTTPResponseContentLengthKey attribute.Key + HTTPRouteKey attribute.Key + HTTPSchemeHTTP attribute.KeyValue + HTTPSchemeHTTPS attribute.KeyValue + HTTPStatusCodeKey attribute.Key + HTTPTargetKey attribute.Key + HTTPURLKey attribute.Key + HTTPUserAgentKey attribute.Key +} + +// ClientResponse returns attributes for an HTTP response received by a client +// from a server. The following attributes are returned if the related values +// are defined in resp: "http.status.code", "http.response_content_length". +// +// This does not add all OpenTelemetry required attributes for an HTTP event, +// it assumes ClientRequest was used to create the span with a complete set of +// attributes. If a complete set of attributes can be generated using the +// request contained in resp. For example: +// +// append(ClientResponse(resp), ClientRequest(resp.Request)...) +func (c *HTTPConv) ClientResponse(resp *http.Response) []attribute.KeyValue { + var n int + if resp.StatusCode > 0 { + n++ + } + if resp.ContentLength > 0 { + n++ + } + + attrs := make([]attribute.KeyValue, 0, n) + if resp.StatusCode > 0 { + attrs = append(attrs, c.HTTPStatusCodeKey.Int(resp.StatusCode)) + } + if resp.ContentLength > 0 { + attrs = append(attrs, c.HTTPResponseContentLengthKey.Int(int(resp.ContentLength))) + } + return attrs +} + +// ClientRequest returns attributes for an HTTP request made by a client. The +// following attributes are always returned: "http.url", "http.flavor", +// "http.method", "net.peer.name". The following attributes are returned if the +// related values are defined in req: "net.peer.port", "http.user_agent", +// "http.request_content_length", "enduser.id". +func (c *HTTPConv) ClientRequest(req *http.Request) []attribute.KeyValue { + n := 3 // URL, peer name, proto, and method. + var h string + if req.URL != nil { + h = req.URL.Host + } + peer, p := firstHostPort(h, req.Header.Get("Host")) + port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", p) + if port > 0 { + n++ + } + useragent := req.UserAgent() + if useragent != "" { + n++ + } + if req.ContentLength > 0 { + n++ + } + userID, _, hasUserID := req.BasicAuth() + if hasUserID { + n++ + } + attrs := make([]attribute.KeyValue, 0, n) + + attrs = append(attrs, c.method(req.Method)) + attrs = append(attrs, c.proto(req.Proto)) + + var u string + if req.URL != nil { + // Remove any username/password info that may be in the URL. + userinfo := req.URL.User + req.URL.User = nil + u = req.URL.String() + // Restore any username/password info that was removed. + req.URL.User = userinfo + } + attrs = append(attrs, c.HTTPURLKey.String(u)) + + attrs = append(attrs, c.NetConv.PeerName(peer)) + if port > 0 { + attrs = append(attrs, c.NetConv.PeerPort(port)) + } + + if useragent != "" { + attrs = append(attrs, c.HTTPUserAgentKey.String(useragent)) + } + + if l := req.ContentLength; l > 0 { + attrs = append(attrs, c.HTTPRequestContentLengthKey.Int64(l)) + } + + if hasUserID { + attrs = append(attrs, c.EnduserIDKey.String(userID)) + } + + return attrs +} + +// ServerRequest returns attributes for an HTTP request received by a server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +// +// The following attributes are always returned: "http.method", "http.scheme", +// "http.flavor", "http.target", "net.host.name". The following attributes are +// returned if they related values are defined in req: "net.host.port", +// "net.sock.peer.addr", "net.sock.peer.port", "http.user_agent", "enduser.id", +// "http.client_ip". +func (c *HTTPConv) ServerRequest(server string, req *http.Request) []attribute.KeyValue { + // TODO: This currently does not add the specification required + // `http.target` attribute. It has too high of a cardinality to safely be + // added. An alternate should be added, or this comment removed, when it is + // addressed by the specification. If it is ultimately decided to continue + // not including the attribute, the HTTPTargetKey field of the HTTPConv + // should be removed as well. + + n := 4 // Method, scheme, proto, and host name. + var host string + var p int + if server == "" { + host, p = splitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = splitHostPort(server) + if p < 0 { + _, p = splitHostPort(req.Host) + } + } + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + n++ + } + peer, peerPort := splitHostPort(req.RemoteAddr) + if peer != "" { + n++ + if peerPort > 0 { + n++ + } + } + useragent := req.UserAgent() + if useragent != "" { + n++ + } + userID, _, hasUserID := req.BasicAuth() + if hasUserID { + n++ + } + clientIP := serverClientIP(req.Header.Get("X-Forwarded-For")) + if clientIP != "" { + n++ + } + attrs := make([]attribute.KeyValue, 0, n) + + attrs = append(attrs, c.method(req.Method)) + attrs = append(attrs, c.scheme(req.TLS != nil)) + attrs = append(attrs, c.proto(req.Proto)) + attrs = append(attrs, c.NetConv.HostName(host)) + + if hostPort > 0 { + attrs = append(attrs, c.NetConv.HostPort(hostPort)) + } + + if peer != "" { + // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a + // file-path that would be interpreted with a sock family. + attrs = append(attrs, c.NetConv.SockPeerAddr(peer)) + if peerPort > 0 { + attrs = append(attrs, c.NetConv.SockPeerPort(peerPort)) + } + } + + if useragent != "" { + attrs = append(attrs, c.HTTPUserAgentKey.String(useragent)) + } + + if hasUserID { + attrs = append(attrs, c.EnduserIDKey.String(userID)) + } + + if clientIP != "" { + attrs = append(attrs, c.HTTPClientIPKey.String(clientIP)) + } + + return attrs +} + +func (c *HTTPConv) method(method string) attribute.KeyValue { + if method == "" { + return c.HTTPMethodKey.String(http.MethodGet) + } + return c.HTTPMethodKey.String(method) +} + +func (c *HTTPConv) scheme(https bool) attribute.KeyValue { // nolint:revive + if https { + return c.HTTPSchemeHTTPS + } + return c.HTTPSchemeHTTP +} + +func (c *HTTPConv) proto(proto string) attribute.KeyValue { + switch proto { + case "HTTP/1.0": + return c.HTTPFlavorKey.String("1.0") + case "HTTP/1.1": + return c.HTTPFlavorKey.String("1.1") + case "HTTP/2": + return c.HTTPFlavorKey.String("2.0") + case "HTTP/3": + return c.HTTPFlavorKey.String("3.0") + default: + return c.HTTPFlavorKey.String(proto) + } +} + +func serverClientIP(xForwardedFor string) string { + if idx := strings.Index(xForwardedFor, ","); idx >= 0 { + xForwardedFor = xForwardedFor[:idx] + } + return xForwardedFor +} + +func requiredHTTPPort(https bool, port int) int { // nolint:revive + if https { + if port > 0 && port != 443 { + return port + } + } else { + if port > 0 && port != 80 { + return port + } + } + return -1 +} + +// Return the request host and port from the first non-empty source. +func firstHostPort(source ...string) (host string, port int) { + for _, hostport := range source { + host, port = splitHostPort(hostport) + if host != "" || port > 0 { + break + } + } + return +} + +// RequestHeader returns the contents of h as OpenTelemetry attributes. +func (c *HTTPConv) RequestHeader(h http.Header) []attribute.KeyValue { + return c.header("http.request.header", h) +} + +// ResponseHeader returns the contents of h as OpenTelemetry attributes. +func (c *HTTPConv) ResponseHeader(h http.Header) []attribute.KeyValue { + return c.header("http.response.header", h) +} + +func (c *HTTPConv) header(prefix string, h http.Header) []attribute.KeyValue { + key := func(k string) attribute.Key { + k = strings.ToLower(k) + k = strings.ReplaceAll(k, "-", "_") + k = fmt.Sprintf("%s.%s", prefix, k) + return attribute.Key(k) + } + + attrs := make([]attribute.KeyValue, 0, len(h)) + for k, v := range h { + attrs = append(attrs, key(k).StringSlice(v)) + } + return attrs +} + +// ClientStatus returns a span status code and message for an HTTP status code +// value received by a client. +func (c *HTTPConv) ClientStatus(code int) (codes.Code, string) { + stat, valid := validateHTTPStatusCode(code) + if !valid { + return stat, fmt.Sprintf("Invalid HTTP status code %d", code) + } + return stat, "" +} + +// ServerStatus returns a span status code and message for an HTTP status code +// value returned by a server. Status codes in the 400-499 range are not +// returned as errors. +func (c *HTTPConv) ServerStatus(code int) (codes.Code, string) { + stat, valid := validateHTTPStatusCode(code) + if !valid { + return stat, fmt.Sprintf("Invalid HTTP status code %d", code) + } + + if code/100 == 4 { + return codes.Unset, "" + } + return stat, "" +} + +type codeRange struct { + fromInclusive int + toInclusive int +} + +func (r codeRange) contains(code int) bool { + return r.fromInclusive <= code && code <= r.toInclusive +} + +var validRangesPerCategory = map[int][]codeRange{ + 1: { + {http.StatusContinue, http.StatusEarlyHints}, + }, + 2: { + {http.StatusOK, http.StatusAlreadyReported}, + {http.StatusIMUsed, http.StatusIMUsed}, + }, + 3: { + {http.StatusMultipleChoices, http.StatusUseProxy}, + {http.StatusTemporaryRedirect, http.StatusPermanentRedirect}, + }, + 4: { + {http.StatusBadRequest, http.StatusTeapot}, // yes, teapot is so useful… + {http.StatusMisdirectedRequest, http.StatusUpgradeRequired}, + {http.StatusPreconditionRequired, http.StatusTooManyRequests}, + {http.StatusRequestHeaderFieldsTooLarge, http.StatusRequestHeaderFieldsTooLarge}, + {http.StatusUnavailableForLegalReasons, http.StatusUnavailableForLegalReasons}, + }, + 5: { + {http.StatusInternalServerError, http.StatusLoopDetected}, + {http.StatusNotExtended, http.StatusNetworkAuthenticationRequired}, + }, +} + +// validateHTTPStatusCode validates the HTTP status code and returns +// corresponding span status code. If the `code` is not a valid HTTP status +// code, returns span status Error and false. +func validateHTTPStatusCode(code int) (codes.Code, bool) { + category := code / 100 + ranges, ok := validRangesPerCategory[category] + if !ok { + return codes.Error, false + } + ok = false + for _, crange := range ranges { + ok = crange.contains(code) + if ok { + break + } + } + if !ok { + return codes.Error, false + } + if category > 0 && category < 4 { + return codes.Unset, true + } + return codes.Error, true +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/internal/v2/net.go b/vendor/go.opentelemetry.io/otel/semconv/internal/v2/net.go new file mode 100644 index 0000000000..4a711133a0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/internal/v2/net.go @@ -0,0 +1,324 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "go.opentelemetry.io/otel/semconv/internal/v2" + +import ( + "net" + "strconv" + "strings" + + "go.opentelemetry.io/otel/attribute" +) + +// NetConv are the network semantic convention attributes defined for a version +// of the OpenTelemetry specification. +type NetConv struct { + NetHostNameKey attribute.Key + NetHostPortKey attribute.Key + NetPeerNameKey attribute.Key + NetPeerPortKey attribute.Key + NetSockFamilyKey attribute.Key + NetSockPeerAddrKey attribute.Key + NetSockPeerPortKey attribute.Key + NetSockHostAddrKey attribute.Key + NetSockHostPortKey attribute.Key + NetTransportOther attribute.KeyValue + NetTransportTCP attribute.KeyValue + NetTransportUDP attribute.KeyValue + NetTransportInProc attribute.KeyValue +} + +func (c *NetConv) Transport(network string) attribute.KeyValue { + switch network { + case "tcp", "tcp4", "tcp6": + return c.NetTransportTCP + case "udp", "udp4", "udp6": + return c.NetTransportUDP + case "unix", "unixgram", "unixpacket": + return c.NetTransportInProc + default: + // "ip:*", "ip4:*", and "ip6:*" all are considered other. + return c.NetTransportOther + } +} + +// Host returns attributes for a network host address. +func (c *NetConv) Host(address string) []attribute.KeyValue { + h, p := splitHostPort(address) + var n int + if h != "" { + n++ + if p > 0 { + n++ + } + } + + if n == 0 { + return nil + } + + attrs := make([]attribute.KeyValue, 0, n) + attrs = append(attrs, c.HostName(h)) + if p > 0 { + attrs = append(attrs, c.HostPort(int(p))) + } + return attrs +} + +// Server returns attributes for a network listener listening at address. See +// net.Listen for information about acceptable address values, address should +// be the same as the one used to create ln. If ln is nil, only network host +// attributes will be returned that describe address. Otherwise, the socket +// level information about ln will also be included. +func (c *NetConv) Server(address string, ln net.Listener) []attribute.KeyValue { + if ln == nil { + return c.Host(address) + } + + lAddr := ln.Addr() + if lAddr == nil { + return c.Host(address) + } + + hostName, hostPort := splitHostPort(address) + sockHostAddr, sockHostPort := splitHostPort(lAddr.String()) + network := lAddr.Network() + sockFamily := family(network, sockHostAddr) + + n := nonZeroStr(hostName, network, sockHostAddr, sockFamily) + n += positiveInt(hostPort, sockHostPort) + attr := make([]attribute.KeyValue, 0, n) + if hostName != "" { + attr = append(attr, c.HostName(hostName)) + if hostPort > 0 { + // Only if net.host.name is set should net.host.port be. + attr = append(attr, c.HostPort(hostPort)) + } + } + if network != "" { + attr = append(attr, c.Transport(network)) + } + if sockFamily != "" { + attr = append(attr, c.NetSockFamilyKey.String(sockFamily)) + } + if sockHostAddr != "" { + attr = append(attr, c.NetSockHostAddrKey.String(sockHostAddr)) + if sockHostPort > 0 { + // Only if net.sock.host.addr is set should net.sock.host.port be. + attr = append(attr, c.NetSockHostPortKey.Int(sockHostPort)) + } + } + return attr +} + +func (c *NetConv) HostName(name string) attribute.KeyValue { + return c.NetHostNameKey.String(name) +} + +func (c *NetConv) HostPort(port int) attribute.KeyValue { + return c.NetHostPortKey.Int(port) +} + +// Client returns attributes for a client network connection to address. See +// net.Dial for information about acceptable address values, address should be +// the same as the one used to create conn. If conn is nil, only network peer +// attributes will be returned that describe address. Otherwise, the socket +// level information about conn will also be included. +func (c *NetConv) Client(address string, conn net.Conn) []attribute.KeyValue { + if conn == nil { + return c.Peer(address) + } + + lAddr, rAddr := conn.LocalAddr(), conn.RemoteAddr() + + var network string + switch { + case lAddr != nil: + network = lAddr.Network() + case rAddr != nil: + network = rAddr.Network() + default: + return c.Peer(address) + } + + peerName, peerPort := splitHostPort(address) + var ( + sockFamily string + sockPeerAddr string + sockPeerPort int + sockHostAddr string + sockHostPort int + ) + + if lAddr != nil { + sockHostAddr, sockHostPort = splitHostPort(lAddr.String()) + } + + if rAddr != nil { + sockPeerAddr, sockPeerPort = splitHostPort(rAddr.String()) + } + + switch { + case sockHostAddr != "": + sockFamily = family(network, sockHostAddr) + case sockPeerAddr != "": + sockFamily = family(network, sockPeerAddr) + } + + n := nonZeroStr(peerName, network, sockPeerAddr, sockHostAddr, sockFamily) + n += positiveInt(peerPort, sockPeerPort, sockHostPort) + attr := make([]attribute.KeyValue, 0, n) + if peerName != "" { + attr = append(attr, c.PeerName(peerName)) + if peerPort > 0 { + // Only if net.peer.name is set should net.peer.port be. + attr = append(attr, c.PeerPort(peerPort)) + } + } + if network != "" { + attr = append(attr, c.Transport(network)) + } + if sockFamily != "" { + attr = append(attr, c.NetSockFamilyKey.String(sockFamily)) + } + if sockPeerAddr != "" { + attr = append(attr, c.NetSockPeerAddrKey.String(sockPeerAddr)) + if sockPeerPort > 0 { + // Only if net.sock.peer.addr is set should net.sock.peer.port be. + attr = append(attr, c.NetSockPeerPortKey.Int(sockPeerPort)) + } + } + if sockHostAddr != "" { + attr = append(attr, c.NetSockHostAddrKey.String(sockHostAddr)) + if sockHostPort > 0 { + // Only if net.sock.host.addr is set should net.sock.host.port be. + attr = append(attr, c.NetSockHostPortKey.Int(sockHostPort)) + } + } + return attr +} + +func family(network, address string) string { + switch network { + case "unix", "unixgram", "unixpacket": + return "unix" + default: + if ip := net.ParseIP(address); ip != nil { + if ip.To4() == nil { + return "inet6" + } + return "inet" + } + } + return "" +} + +func nonZeroStr(strs ...string) int { + var n int + for _, str := range strs { + if str != "" { + n++ + } + } + return n +} + +func positiveInt(ints ...int) int { + var n int + for _, i := range ints { + if i > 0 { + n++ + } + } + return n +} + +// Peer returns attributes for a network peer address. +func (c *NetConv) Peer(address string) []attribute.KeyValue { + h, p := splitHostPort(address) + var n int + if h != "" { + n++ + if p > 0 { + n++ + } + } + + if n == 0 { + return nil + } + + attrs := make([]attribute.KeyValue, 0, n) + attrs = append(attrs, c.PeerName(h)) + if p > 0 { + attrs = append(attrs, c.PeerPort(int(p))) + } + return attrs +} + +func (c *NetConv) PeerName(name string) attribute.KeyValue { + return c.NetPeerNameKey.String(name) +} + +func (c *NetConv) PeerPort(port int) attribute.KeyValue { + return c.NetPeerPortKey.Int(port) +} + +func (c *NetConv) SockPeerAddr(addr string) attribute.KeyValue { + return c.NetSockPeerAddrKey.String(addr) +} + +func (c *NetConv) SockPeerPort(port int) attribute.KeyValue { + return c.NetSockPeerPortKey.Int(port) +} + +// splitHostPort splits a network address hostport of the form "host", +// "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port", +// "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and +// port. +// +// An empty host is returned if it is not provided or unparsable. A negative +// port is returned if it is not provided or unparsable. +func splitHostPort(hostport string) (host string, port int) { + port = -1 + + if strings.HasPrefix(hostport, "[") { + addrEnd := strings.LastIndex(hostport, "]") + if addrEnd < 0 { + // Invalid hostport. + return + } + if i := strings.LastIndex(hostport[addrEnd:], ":"); i < 0 { + host = hostport[1:addrEnd] + return + } + } else { + if i := strings.LastIndex(hostport, ":"); i < 0 { + host = hostport + return + } + } + + host, pStr, err := net.SplitHostPort(hostport) + if err != nil { + return + } + + p, err := strconv.ParseUint(pStr, 10, 16) + if err != nil { + return + } + return host, int(p) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/http.go deleted file mode 100644 index 4b4f3cbaf0..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/http.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0" - -import ( - "net/http" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/semconv/internal" - "go.opentelemetry.io/otel/trace" -) - -// HTTP scheme attributes. -var ( - HTTPSchemeHTTP = HTTPSchemeKey.String("http") - HTTPSchemeHTTPS = HTTPSchemeKey.String("https") -) - -var sc = &internal.SemanticConventions{ - EnduserIDKey: EnduserIDKey, - HTTPClientIPKey: HTTPClientIPKey, - HTTPFlavorKey: HTTPFlavorKey, - HTTPHostKey: HTTPHostKey, - HTTPMethodKey: HTTPMethodKey, - HTTPRequestContentLengthKey: HTTPRequestContentLengthKey, - HTTPRouteKey: HTTPRouteKey, - HTTPSchemeHTTP: HTTPSchemeHTTP, - HTTPSchemeHTTPS: HTTPSchemeHTTPS, - HTTPServerNameKey: HTTPServerNameKey, - HTTPStatusCodeKey: HTTPStatusCodeKey, - HTTPTargetKey: HTTPTargetKey, - HTTPURLKey: HTTPURLKey, - HTTPUserAgentKey: HTTPUserAgentKey, - NetHostIPKey: NetHostIPKey, - NetHostNameKey: NetHostNameKey, - NetHostPortKey: NetHostPortKey, - NetPeerIPKey: NetPeerIPKey, - NetPeerNameKey: NetPeerNameKey, - NetPeerPortKey: NetPeerPortKey, - NetTransportIP: NetTransportIP, - NetTransportOther: NetTransportOther, - NetTransportTCP: NetTransportTCP, - NetTransportUDP: NetTransportUDP, - NetTransportUnix: NetTransportUnix, -} - -// NetAttributesFromHTTPRequest generates attributes of the net -// namespace as specified by the OpenTelemetry specification for a -// span. The network parameter is a string that net.Dial function -// from standard library can understand. -func NetAttributesFromHTTPRequest(network string, request *http.Request) []attribute.KeyValue { - return sc.NetAttributesFromHTTPRequest(network, request) -} - -// EndUserAttributesFromHTTPRequest generates attributes of the -// enduser namespace as specified by the OpenTelemetry specification -// for a span. -func EndUserAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { - return sc.EndUserAttributesFromHTTPRequest(request) -} - -// HTTPClientAttributesFromHTTPRequest generates attributes of the -// http namespace as specified by the OpenTelemetry specification for -// a span on the client side. -func HTTPClientAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { - return sc.HTTPClientAttributesFromHTTPRequest(request) -} - -// HTTPServerMetricAttributesFromHTTPRequest generates low-cardinality attributes -// to be used with server-side HTTP metrics. -func HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http.Request) []attribute.KeyValue { - return sc.HTTPServerMetricAttributesFromHTTPRequest(serverName, request) -} - -// HTTPServerAttributesFromHTTPRequest generates attributes of the -// http namespace as specified by the OpenTelemetry specification for -// a span on the server side. Currently, only basic authentication is -// supported. -func HTTPServerAttributesFromHTTPRequest(serverName, route string, request *http.Request) []attribute.KeyValue { - return sc.HTTPServerAttributesFromHTTPRequest(serverName, route, request) -} - -// HTTPAttributesFromHTTPStatusCode generates attributes of the http -// namespace as specified by the OpenTelemetry specification for a -// span. -func HTTPAttributesFromHTTPStatusCode(code int) []attribute.KeyValue { - return sc.HTTPAttributesFromHTTPStatusCode(code) -} - -// SpanStatusFromHTTPStatusCode generates a status code and a message -// as specified by the OpenTelemetry specification for a span. -func SpanStatusFromHTTPStatusCode(code int) (codes.Code, string) { - return internal.SpanStatusFromHTTPStatusCode(code) -} - -// SpanStatusFromHTTPStatusCodeAndSpanKind generates a status code and a message -// as specified by the OpenTelemetry specification for a span. -// Exclude 4xx for SERVER to set the appropriate status. -func SpanStatusFromHTTPStatusCodeAndSpanKind(code int, spanKind trace.SpanKind) (codes.Code, string) { - return internal.SpanStatusFromHTTPStatusCodeAndSpanKind(code, spanKind) -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/resource.go deleted file mode 100644 index b2155676f4..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/resource.go +++ /dev/null @@ -1,1042 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0" - -import "go.opentelemetry.io/otel/attribute" - -// The web browser in which the application represented by the resource is running. The `browser.*` attributes MUST be used only for resources that represent applications running in a web browser (regardless of whether running on a mobile or desktop device). -const ( - // Array of brand name and version separated by a space - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (navigator.userAgentData.brands). - BrowserBrandsKey = attribute.Key("browser.brands") - // The platform on which the browser is running - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Windows', 'macOS', 'Android' - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (navigator.userAgentData.platform). If unavailable, the legacy - // `navigator.platform` API SHOULD NOT be used instead and this attribute SHOULD - // be left unset in order for the values to be consistent. - // The list of possible values is defined in the [W3C User-Agent Client Hints - // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). - // Note that some (but not all) of these values can overlap with values in the - // [os.type and os.name attributes](./os.md). However, for consistency, the values - // in the `browser.platform` attribute should capture the exact value that the - // user agent provides. - BrowserPlatformKey = attribute.Key("browser.platform") - // Full user-agent string provided by the browser - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 - // (KHTML, ' - // 'like Gecko) Chrome/95.0.4638.54 Safari/537.36' - // Note: The user-agent value SHOULD be provided only from browsers that do not - // have a mechanism to retrieve brands and platform individually from the User- - // Agent Client Hints API. To retrieve the value, the legacy `navigator.userAgent` - // API can be used. - BrowserUserAgentKey = attribute.Key("browser.user_agent") -) - -// A cloud environment (e.g. GCP, Azure, AWS) -const ( - // Name of the cloud provider. - // - // Type: Enum - // Required: No - // Stability: stable - CloudProviderKey = attribute.Key("cloud.provider") - // The cloud account ID the resource is assigned to. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '111111111111', 'opentelemetry' - CloudAccountIDKey = attribute.Key("cloud.account.id") - // The geographical region the resource is running. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'us-central1', 'us-east-1' - // Note: Refer to your provider's docs to see the available regions, for example - // [Alibaba Cloud regions](https://www.alibabacloud.com/help/doc- - // detail/40654.htm), [AWS regions](https://aws.amazon.com/about-aws/global- - // infrastructure/regions_az/), [Azure regions](https://azure.microsoft.com/en- - // us/global-infrastructure/geographies/), [Google Cloud - // regions](https://cloud.google.com/about/locations), or [Tencent Cloud - // regions](https://intl.cloud.tencent.com/document/product/213/6091). - CloudRegionKey = attribute.Key("cloud.region") - // Cloud regions often have multiple, isolated locations known as zones to - // increase availability. Availability zone represents the zone where the resource - // is running. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'us-east-1c' - // Note: Availability zones are called "zones" on Alibaba Cloud and Google Cloud. - CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") - // The cloud platform in use. - // - // Type: Enum - // Required: No - // Stability: stable - // Note: The prefix of the service SHOULD match the one specified in - // `cloud.provider`. - CloudPlatformKey = attribute.Key("cloud.platform") -) - -var ( - // Alibaba Cloud - CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") - // Amazon Web Services - CloudProviderAWS = CloudProviderKey.String("aws") - // Microsoft Azure - CloudProviderAzure = CloudProviderKey.String("azure") - // Google Cloud Platform - CloudProviderGCP = CloudProviderKey.String("gcp") - // Tencent Cloud - CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") -) - -var ( - // Alibaba Cloud Elastic Compute Service - CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") - // Alibaba Cloud Function Compute - CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") - // AWS Elastic Compute Cloud - CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") - // AWS Elastic Container Service - CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") - // AWS Elastic Kubernetes Service - CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") - // AWS Lambda - CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") - // AWS Elastic Beanstalk - CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") - // AWS App Runner - CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") - // Azure Virtual Machines - CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") - // Azure Container Instances - CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") - // Azure Kubernetes Service - CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") - // Azure Functions - CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") - // Azure App Service - CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") - // Google Cloud Compute Engine (GCE) - CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") - // Google Cloud Run - CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") - // Google Cloud Kubernetes Engine (GKE) - CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") - // Google Cloud Functions (GCF) - CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") - // Google Cloud App Engine (GAE) - CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") - // Tencent Cloud Cloud Virtual Machine (CVM) - CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") - // Tencent Cloud Elastic Kubernetes Service (EKS) - CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") - // Tencent Cloud Serverless Cloud Function (SCF) - CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") -) - -// Resources used by AWS Elastic Container Service (ECS). -const ( - // The Amazon Resource Name (ARN) of an [ECS container instance](https://docs.aws. - // amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'arn:aws:ecs:us- - // west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' - AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") - // The ARN of an [ECS cluster](https://docs.aws.amazon.com/AmazonECS/latest/develo - // perguide/clusters.html). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") - // The [launch type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/l - // aunch_types.html) for an ECS task. - // - // Type: Enum - // Required: No - // Stability: stable - AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") - // The ARN of an [ECS task definition](https://docs.aws.amazon.com/AmazonECS/lates - // t/developerguide/task_definitions.html). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'arn:aws:ecs:us- - // west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' - AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") - // The task definition family this task definition is a member of. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry-family' - AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") - // The revision for this task definition. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '8', '26' - AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") -) - -var ( - // ec2 - AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") - // fargate - AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") -) - -// Resources used by AWS Elastic Kubernetes Service (EKS). -const ( - // The ARN of an EKS cluster. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") -) - -// Resources specific to Amazon Web Services. -const ( - // The name(s) of the AWS log group(s) an application is writing to. - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: '/aws/lambda/my-function', 'opentelemetry-service' - // Note: Multiple log groups must be supported for cases like multi-container - // applications, where a single application has sidecar containers, and each write - // to their own log group. - AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") - // The Amazon Resource Name(s) (ARN) of the AWS log group(s). - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' - // Note: See the [log group ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- - // access-control-overview-cwl.html#CWL_ARN_Format). - AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") - // The name(s) of the AWS log stream(s) an application is writing to. - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") - // The ARN(s) of the AWS log stream(s). - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log- - // stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - // Note: See the [log stream ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- - // access-control-overview-cwl.html#CWL_ARN_Format). One log group can contain - // several log streams, so these ARNs necessarily identify both a log group and a - // log stream. - AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") -) - -// A container instance. -const ( - // Container name used by container runtime. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry-autoconf' - ContainerNameKey = attribute.Key("container.name") - // Container ID. Usually a UUID, as for example used to [identify Docker - // containers](https://docs.docker.com/engine/reference/run/#container- - // identification). The UUID might be abbreviated. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'a3bf90e006b2' - ContainerIDKey = attribute.Key("container.id") - // The container runtime managing this container. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'docker', 'containerd', 'rkt' - ContainerRuntimeKey = attribute.Key("container.runtime") - // Name of the image the container was built on. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'gcr.io/opentelemetry/operator' - ContainerImageNameKey = attribute.Key("container.image.name") - // Container image tag. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '0.1' - ContainerImageTagKey = attribute.Key("container.image.tag") -) - -// The software deployment. -const ( - // Name of the [deployment - // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka - // deployment tier). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'staging', 'production' - DeploymentEnvironmentKey = attribute.Key("deployment.environment") -) - -// The device on which the process represented by this resource is running. -const ( - // A unique identifier representing the device - // - // Type: string - // Required: No - // Stability: stable - // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' - // Note: The device identifier MUST only be defined using the values outlined - // below. This value is not an advertising identifier and MUST NOT be used as - // such. On iOS (Swift or Objective-C), this value MUST be equal to the [vendor id - // entifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-iden - // tifierforvendor). On Android (Java or Kotlin), this value MUST be equal to the - // Firebase Installation ID or a globally unique UUID which is persisted across - // sessions in your application. More information can be found - // [here](https://developer.android.com/training/articles/user-data-ids) on best - // practices and exact implementation details. Caution should be taken when - // storing personal data or anything which can identify a user. GDPR and data - // protection laws may apply, ensure you do your own due diligence. - DeviceIDKey = attribute.Key("device.id") - // The model identifier for the device - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'iPhone3,4', 'SM-G920F' - // Note: It's recommended this value represents a machine readable version of the - // model identifier rather than the market or consumer-friendly name of the - // device. - DeviceModelIdentifierKey = attribute.Key("device.model.identifier") - // The marketing name for the device model - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' - // Note: It's recommended this value represents a human readable version of the - // device model rather than a machine readable alternative. - DeviceModelNameKey = attribute.Key("device.model.name") - // The name of the device manufacturer - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Apple', 'Samsung' - // Note: The Android OS provides this field via - // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). - // iOS apps SHOULD hardcode the value `Apple`. - DeviceManufacturerKey = attribute.Key("device.manufacturer") -) - -// A serverless instance. -const ( - // The name of the single function that this runtime instance executes. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'my-function', 'myazurefunctionapp/some-function-name' - // Note: This is the name of the function as configured/deployed on the FaaS - // platform and is usually different from the name of the callback - // function (which may be stored in the - // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span- - // general.md#source-code-attributes) - // span attributes). - - // For some cloud providers, the above definition is ambiguous. The following - // definition of function name MUST be used for this attribute - // (and consequently the span name) for the listed cloud providers/products: - - // * **Azure:** The full name `/`, i.e., function app name - // followed by a forward slash followed by the function name (this form - // can also be seen in the resource JSON for the function). - // This means that a span attribute MUST be used, as an Azure function - // app can host multiple functions that would usually share - // a TracerProvider (see also the `faas.id` attribute). - FaaSNameKey = attribute.Key("faas.name") - // The unique ID of the single function that this runtime instance executes. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function' - // Note: On some cloud providers, it may not be possible to determine the full ID - // at startup, - // so consider setting `faas.id` as a span attribute instead. - - // The exact value to use for `faas.id` depends on the cloud provider: - - // * **AWS Lambda:** The function - // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and- - // namespaces.html). - // Take care not to use the "invoked ARN" directly but replace any - // [alias suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration- - // aliases.html) - // with the resolved function version, as the same runtime instance may be - // invokable with - // multiple different aliases. - // * **GCP:** The [URI of the resource](https://cloud.google.com/iam/docs/full- - // resource-names) - // * **Azure:** The [Fully Qualified Resource ID](https://docs.microsoft.com/en- - // us/rest/api/resources/resources/get-by-id) of the invoked function, - // *not* the function app, having the form - // `/subscriptions//resourceGroups//providers/Microsoft.We - // b/sites//functions/`. - // This means that a span attribute MUST be used, as an Azure function app can - // host multiple functions that would usually share - // a TracerProvider. - FaaSIDKey = attribute.Key("faas.id") - // The immutable version of the function being executed. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '26', 'pinkfroid-00002' - // Note: Depending on the cloud provider and platform, use: - - // * **AWS Lambda:** The [function - // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration- - // versions.html) - // (an integer represented as a decimal string). - // * **Google Cloud Run:** The - // [revision](https://cloud.google.com/run/docs/managing/revisions) - // (i.e., the function name plus the revision suffix). - // * **Google Cloud Functions:** The value of the - // [`K_REVISION` environment - // variable](https://cloud.google.com/functions/docs/env- - // var#runtime_environment_variables_set_automatically). - // * **Azure Functions:** Not applicable. Do not set this attribute. - FaaSVersionKey = attribute.Key("faas.version") - // The execution environment ID as a string, that will be potentially reused for - // other invocations to the same function/function version. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' - // Note: * **AWS Lambda:** Use the (full) log stream name. - FaaSInstanceKey = attribute.Key("faas.instance") - // The amount of memory available to the serverless function in MiB. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 128 - // Note: It's recommended to set this attribute since e.g. too little memory can - // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, - // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this - // information. - FaaSMaxMemoryKey = attribute.Key("faas.max_memory") -) - -// A host is defined as a general computing instance. -const ( - // Unique host ID. For Cloud, this must be the instance_id assigned by the cloud - // provider. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry-test' - HostIDKey = attribute.Key("host.id") - // Name of the host. On Unix systems, it may contain what the hostname command - // returns, or the fully qualified hostname, or another name specified by the - // user. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry-test' - HostNameKey = attribute.Key("host.name") - // Type of host. For Cloud, this must be the machine type. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'n1-standard-1' - HostTypeKey = attribute.Key("host.type") - // The CPU architecture the host system is running on. - // - // Type: Enum - // Required: No - // Stability: stable - HostArchKey = attribute.Key("host.arch") - // Name of the VM image or OS install the host was instantiated from. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' - HostImageNameKey = attribute.Key("host.image.name") - // VM image ID. For Cloud, this value is from the provider. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'ami-07b06b442921831e5' - HostImageIDKey = attribute.Key("host.image.id") - // The version string of the VM image as defined in [Version - // Attributes](README.md#version-attributes). - // - // Type: string - // Required: No - // Stability: stable - // Examples: '0.1' - HostImageVersionKey = attribute.Key("host.image.version") -) - -var ( - // AMD64 - HostArchAMD64 = HostArchKey.String("amd64") - // ARM32 - HostArchARM32 = HostArchKey.String("arm32") - // ARM64 - HostArchARM64 = HostArchKey.String("arm64") - // Itanium - HostArchIA64 = HostArchKey.String("ia64") - // 32-bit PowerPC - HostArchPPC32 = HostArchKey.String("ppc32") - // 64-bit PowerPC - HostArchPPC64 = HostArchKey.String("ppc64") - // IBM z/Architecture - HostArchS390x = HostArchKey.String("s390x") - // 32-bit x86 - HostArchX86 = HostArchKey.String("x86") -) - -// A Kubernetes Cluster. -const ( - // The name of the cluster. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry-cluster' - K8SClusterNameKey = attribute.Key("k8s.cluster.name") -) - -// A Kubernetes Node object. -const ( - // The name of the Node. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'node-1' - K8SNodeNameKey = attribute.Key("k8s.node.name") - // The UID of the Node. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' - K8SNodeUIDKey = attribute.Key("k8s.node.uid") -) - -// A Kubernetes Namespace. -const ( - // The name of the namespace that the pod is running in. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'default' - K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") -) - -// A Kubernetes Pod object. -const ( - // The UID of the Pod. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SPodUIDKey = attribute.Key("k8s.pod.uid") - // The name of the Pod. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry-pod-autoconf' - K8SPodNameKey = attribute.Key("k8s.pod.name") -) - -// A container in a [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). -const ( - // The name of the Container from Pod specification, must be unique within a Pod. - // Container runtime usually uses different globally unique name - // (`container.name`). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'redis' - K8SContainerNameKey = attribute.Key("k8s.container.name") - // Number of times the container was restarted. This attribute can be used to - // identify a particular container (running or stopped) within a container spec. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 0, 2 - K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") -) - -// A Kubernetes ReplicaSet object. -const ( - // The UID of the ReplicaSet. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") - // The name of the ReplicaSet. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry' - K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") -) - -// A Kubernetes Deployment object. -const ( - // The UID of the Deployment. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") - // The name of the Deployment. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry' - K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") -) - -// A Kubernetes StatefulSet object. -const ( - // The UID of the StatefulSet. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") - // The name of the StatefulSet. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry' - K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") -) - -// A Kubernetes DaemonSet object. -const ( - // The UID of the DaemonSet. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") - // The name of the DaemonSet. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry' - K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") -) - -// A Kubernetes Job object. -const ( - // The UID of the Job. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SJobUIDKey = attribute.Key("k8s.job.uid") - // The name of the Job. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry' - K8SJobNameKey = attribute.Key("k8s.job.name") -) - -// A Kubernetes CronJob object. -const ( - // The UID of the CronJob. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") - // The name of the CronJob. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry' - K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") -) - -// The operating system (OS) on which the process represented by this resource is running. -const ( - // The operating system type. - // - // Type: Enum - // Required: Always - // Stability: stable - OSTypeKey = attribute.Key("os.type") - // Human readable (not intended to be parsed) OS version information, like e.g. - // reported by `ver` or `lsb_release -a` commands. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 LTS' - OSDescriptionKey = attribute.Key("os.description") - // Human readable operating system name. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'iOS', 'Android', 'Ubuntu' - OSNameKey = attribute.Key("os.name") - // The version string of the operating system as defined in [Version - // Attributes](../../resource/semantic_conventions/README.md#version-attributes). - // - // Type: string - // Required: No - // Stability: stable - // Examples: '14.2.1', '18.04.1' - OSVersionKey = attribute.Key("os.version") -) - -var ( - // Microsoft Windows - OSTypeWindows = OSTypeKey.String("windows") - // Linux - OSTypeLinux = OSTypeKey.String("linux") - // Apple Darwin - OSTypeDarwin = OSTypeKey.String("darwin") - // FreeBSD - OSTypeFreeBSD = OSTypeKey.String("freebsd") - // NetBSD - OSTypeNetBSD = OSTypeKey.String("netbsd") - // OpenBSD - OSTypeOpenBSD = OSTypeKey.String("openbsd") - // DragonFly BSD - OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") - // HP-UX (Hewlett Packard Unix) - OSTypeHPUX = OSTypeKey.String("hpux") - // AIX (Advanced Interactive eXecutive) - OSTypeAIX = OSTypeKey.String("aix") - // SunOS, Oracle Solaris - OSTypeSolaris = OSTypeKey.String("solaris") - // IBM z/OS - OSTypeZOS = OSTypeKey.String("z_os") -) - -// An operating system process. -const ( - // Process identifier (PID). - // - // Type: int - // Required: No - // Stability: stable - // Examples: 1234 - ProcessPIDKey = attribute.Key("process.pid") - // The name of the process executable. On Linux based systems, can be set to the - // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name of - // `GetProcessImageFileNameW`. - // - // Type: string - // Required: See below - // Stability: stable - // Examples: 'otelcol' - ProcessExecutableNameKey = attribute.Key("process.executable.name") - // The full path to the process executable. On Linux based systems, can be set to - // the target of `proc/[pid]/exe`. On Windows, can be set to the result of - // `GetProcessImageFileNameW`. - // - // Type: string - // Required: See below - // Stability: stable - // Examples: '/usr/bin/cmd/otelcol' - ProcessExecutablePathKey = attribute.Key("process.executable.path") - // The command used to launch the process (i.e. the command name). On Linux based - // systems, can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, - // can be set to the first parameter extracted from `GetCommandLineW`. - // - // Type: string - // Required: See below - // Stability: stable - // Examples: 'cmd/otelcol' - ProcessCommandKey = attribute.Key("process.command") - // The full command used to launch the process as a single string representing the - // full command. On Windows, can be set to the result of `GetCommandLineW`. Do not - // set this if you have to assemble it just for monitoring; use - // `process.command_args` instead. - // - // Type: string - // Required: See below - // Stability: stable - // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' - ProcessCommandLineKey = attribute.Key("process.command_line") - // All the command arguments (including the command/executable itself) as received - // by the process. On Linux-based systems (and some other Unixoid systems - // supporting procfs), can be set according to the list of null-delimited strings - // extracted from `proc/[pid]/cmdline`. For libc-based executables, this would be - // the full argv vector passed to `main`. - // - // Type: string[] - // Required: See below - // Stability: stable - // Examples: 'cmd/otecol', '--config=config.yaml' - ProcessCommandArgsKey = attribute.Key("process.command_args") - // The username of the user that owns the process. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'root' - ProcessOwnerKey = attribute.Key("process.owner") -) - -// The single (language) runtime instance which is monitored. -const ( - // The name of the runtime of this process. For compiled native binaries, this - // SHOULD be the name of the compiler. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'OpenJDK Runtime Environment' - ProcessRuntimeNameKey = attribute.Key("process.runtime.name") - // The version of the runtime of this process, as returned by the runtime without - // modification. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '14.0.2' - ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") - // An additional description about the runtime of the process, for example a - // specific vendor customization of the runtime environment. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' - ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") -) - -// A service instance. -const ( - // Logical name of the service. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'shoppingcart' - // Note: MUST be the same for all instances of horizontally scaled services. If - // the value was not specified, SDKs MUST fallback to `unknown_service:` - // concatenated with [`process.executable.name`](process.md#process), e.g. - // `unknown_service:bash`. If `process.executable.name` is not available, the - // value MUST be set to `unknown_service`. - ServiceNameKey = attribute.Key("service.name") - // A namespace for `service.name`. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Shop' - // Note: A string value having a meaning that helps to distinguish a group of - // services, for example the team name that owns a group of services. - // `service.name` is expected to be unique within the same namespace. If - // `service.namespace` is not specified in the Resource then `service.name` is - // expected to be unique for all services that have no explicit namespace defined - // (so the empty/unspecified namespace is simply one more valid namespace). Zero- - // length namespace string is assumed equal to unspecified namespace. - ServiceNamespaceKey = attribute.Key("service.namespace") - // The string ID of the service instance. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '627cc493-f310-47de-96bd-71410b7dec09' - // Note: MUST be unique for each instance of the same - // `service.namespace,service.name` pair (in other words - // `service.namespace,service.name,service.instance.id` triplet MUST be globally - // unique). The ID helps to distinguish instances of the same service that exist - // at the same time (e.g. instances of a horizontally scaled service). It is - // preferable for the ID to be persistent and stay the same for the lifetime of - // the service instance, however it is acceptable that the ID is ephemeral and - // changes during important lifetime events for the service (e.g. service - // restarts). If the service has no inherent unique ID that can be used as the - // value of this attribute it is recommended to generate a random Version 1 or - // Version 4 RFC 4122 UUID (services aiming for reproducible UUIDs may also use - // Version 5, see RFC 4122 for more recommendations). - ServiceInstanceIDKey = attribute.Key("service.instance.id") - // The version string of the service API or implementation. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '2.0.0' - ServiceVersionKey = attribute.Key("service.version") -) - -// The telemetry SDK used to capture data recorded by the instrumentation libraries. -const ( - // The name of the telemetry SDK as defined above. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry' - TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") - // The language of the telemetry SDK. - // - // Type: Enum - // Required: No - // Stability: stable - TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") - // The version string of the telemetry SDK. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '1.2.3' - TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") - // The version string of the auto instrumentation agent, if used. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '1.2.3' - TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") -) - -var ( - // cpp - TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") - // dotnet - TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") - // erlang - TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") - // go - TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") - // java - TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") - // nodejs - TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") - // php - TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") - // python - TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") - // ruby - TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") - // webjs - TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") - // swift - TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") -) - -// Resource describing the packaged software running the application code. Web engines are typically executed using process.runtime. -const ( - // The name of the web engine. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'WildFly' - WebEngineNameKey = attribute.Key("webengine.name") - // The version of the web engine. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '21.0.0' - WebEngineVersionKey = attribute.Key("webengine.version") - // Additional description of the web engine (e.g. detailed version and edition - // information). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final' - WebEngineDescriptionKey = attribute.Key("webengine.description") -) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/trace.go deleted file mode 100644 index 047d8e95cc..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/trace.go +++ /dev/null @@ -1,1704 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0" - -import "go.opentelemetry.io/otel/attribute" - -// Span attributes used by AWS Lambda (in addition to general `faas` attributes). -const ( - // The full invoked ARN as provided on the `Context` passed to the function - // (`Lambda-Runtime-Invoked-Function-ARN` header on the `/runtime/invocation/next` - // applicable). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' - // Note: This may be different from `faas.id` if an alias is involved. - AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") -) - -// This document defines attributes for CloudEvents. CloudEvents is a specification on how to define event data in a standard way. These attributes can be attached to spans when performing operations with CloudEvents, regardless of the protocol being used. -const ( - // The [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec - // .md#id) uniquely identifies the event. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' - CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") - // The [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.m - // d#source-1) identifies the context in which an event happened. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'https://github.com/cloudevents', '/cloudevents/spec/pull/123', 'my- - // service' - CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") - // The [version of the CloudEvents specification](https://github.com/cloudevents/s - // pec/blob/v1.0.2/cloudevents/spec.md#specversion) which the event uses. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: '1.0' - CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") - // The [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/sp - // ec.md#type) contains a value describing the type of event related to the - // originating occurrence. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'com.github.pull_request.opened', 'com.example.object.deleted.v2' - CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") - // The [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec. - // md#subject) of the event in the context of the event producer (identified by - // source). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'mynewfile.jpg' - CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") -) - -// This document defines semantic conventions for the OpenTracing Shim -const ( - // Parent-child Reference type - // - // Type: Enum - // Required: No - // Stability: stable - // Note: The causal relationship between a child Span and a parent Span. - OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") -) - -var ( - // The parent Span depends on the child Span in some capacity - OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") - // The parent Span does not depend in any way on the result of the child Span - OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") -) - -// This document defines the attributes used to perform database client calls. -const ( - // An identifier for the database management system (DBMS) product being used. See - // below for a list of well-known identifiers. - // - // Type: Enum - // Required: Always - // Stability: stable - DBSystemKey = attribute.Key("db.system") - // The connection string used to connect to the database. It is recommended to - // remove embedded credentials. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' - DBConnectionStringKey = attribute.Key("db.connection_string") - // Username for accessing the database. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'readonly_user', 'reporting_user' - DBUserKey = attribute.Key("db.user") - // The fully-qualified class name of the [Java Database Connectivity - // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver - // used to connect. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'org.postgresql.Driver', - // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' - DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") - // This attribute is used to report the name of the database being accessed. For - // commands that switch the database, this should be set to the target database - // (even if the command fails). - // - // Type: string - // Required: Required, if applicable. - // Stability: stable - // Examples: 'customers', 'main' - // Note: In some SQL databases, the database name to be used is called "schema - // name". In case there are multiple layers that could be considered for database - // name (e.g. Oracle instance name and schema name), the database name to be used - // is the more specific layer (e.g. Oracle schema name). - DBNameKey = attribute.Key("db.name") - // The database statement being executed. - // - // Type: string - // Required: Required if applicable and not explicitly disabled via - // instrumentation configuration. - // Stability: stable - // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' - // Note: The value may be sanitized to exclude sensitive information. - DBStatementKey = attribute.Key("db.statement") - // The name of the operation being executed, e.g. the [MongoDB command - // name](https://docs.mongodb.com/manual/reference/command/#database-operations) - // such as `findAndModify`, or the SQL keyword. - // - // Type: string - // Required: Required, if `db.statement` is not applicable. - // Stability: stable - // Examples: 'findAndModify', 'HMSET', 'SELECT' - // Note: When setting this to an SQL keyword, it is not recommended to attempt any - // client-side parsing of `db.statement` just to get this property, but it should - // be set if the operation name is provided by the library being instrumented. If - // the SQL statement has an ambiguous operation, or performs more than one - // operation, this value may be omitted. - DBOperationKey = attribute.Key("db.operation") -) - -var ( - // Some other SQL database. Fallback only. See notes - DBSystemOtherSQL = DBSystemKey.String("other_sql") - // Microsoft SQL Server - DBSystemMSSQL = DBSystemKey.String("mssql") - // MySQL - DBSystemMySQL = DBSystemKey.String("mysql") - // Oracle Database - DBSystemOracle = DBSystemKey.String("oracle") - // IBM DB2 - DBSystemDB2 = DBSystemKey.String("db2") - // PostgreSQL - DBSystemPostgreSQL = DBSystemKey.String("postgresql") - // Amazon Redshift - DBSystemRedshift = DBSystemKey.String("redshift") - // Apache Hive - DBSystemHive = DBSystemKey.String("hive") - // Cloudscape - DBSystemCloudscape = DBSystemKey.String("cloudscape") - // HyperSQL DataBase - DBSystemHSQLDB = DBSystemKey.String("hsqldb") - // Progress Database - DBSystemProgress = DBSystemKey.String("progress") - // SAP MaxDB - DBSystemMaxDB = DBSystemKey.String("maxdb") - // SAP HANA - DBSystemHanaDB = DBSystemKey.String("hanadb") - // Ingres - DBSystemIngres = DBSystemKey.String("ingres") - // FirstSQL - DBSystemFirstSQL = DBSystemKey.String("firstsql") - // EnterpriseDB - DBSystemEDB = DBSystemKey.String("edb") - // InterSystems Caché - DBSystemCache = DBSystemKey.String("cache") - // Adabas (Adaptable Database System) - DBSystemAdabas = DBSystemKey.String("adabas") - // Firebird - DBSystemFirebird = DBSystemKey.String("firebird") - // Apache Derby - DBSystemDerby = DBSystemKey.String("derby") - // FileMaker - DBSystemFilemaker = DBSystemKey.String("filemaker") - // Informix - DBSystemInformix = DBSystemKey.String("informix") - // InstantDB - DBSystemInstantDB = DBSystemKey.String("instantdb") - // InterBase - DBSystemInterbase = DBSystemKey.String("interbase") - // MariaDB - DBSystemMariaDB = DBSystemKey.String("mariadb") - // Netezza - DBSystemNetezza = DBSystemKey.String("netezza") - // Pervasive PSQL - DBSystemPervasive = DBSystemKey.String("pervasive") - // PointBase - DBSystemPointbase = DBSystemKey.String("pointbase") - // SQLite - DBSystemSqlite = DBSystemKey.String("sqlite") - // Sybase - DBSystemSybase = DBSystemKey.String("sybase") - // Teradata - DBSystemTeradata = DBSystemKey.String("teradata") - // Vertica - DBSystemVertica = DBSystemKey.String("vertica") - // H2 - DBSystemH2 = DBSystemKey.String("h2") - // ColdFusion IMQ - DBSystemColdfusion = DBSystemKey.String("coldfusion") - // Apache Cassandra - DBSystemCassandra = DBSystemKey.String("cassandra") - // Apache HBase - DBSystemHBase = DBSystemKey.String("hbase") - // MongoDB - DBSystemMongoDB = DBSystemKey.String("mongodb") - // Redis - DBSystemRedis = DBSystemKey.String("redis") - // Couchbase - DBSystemCouchbase = DBSystemKey.String("couchbase") - // CouchDB - DBSystemCouchDB = DBSystemKey.String("couchdb") - // Microsoft Azure Cosmos DB - DBSystemCosmosDB = DBSystemKey.String("cosmosdb") - // Amazon DynamoDB - DBSystemDynamoDB = DBSystemKey.String("dynamodb") - // Neo4j - DBSystemNeo4j = DBSystemKey.String("neo4j") - // Apache Geode - DBSystemGeode = DBSystemKey.String("geode") - // Elasticsearch - DBSystemElasticsearch = DBSystemKey.String("elasticsearch") - // Memcached - DBSystemMemcached = DBSystemKey.String("memcached") - // CockroachDB - DBSystemCockroachdb = DBSystemKey.String("cockroachdb") -) - -// Connection-level attributes for Microsoft SQL Server -const ( - // The Microsoft SQL Server [instance name](https://docs.microsoft.com/en- - // us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) - // connecting to. This name is used to determine the port of a named instance. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'MSSQLSERVER' - // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no longer - // required (but still recommended if non-standard). - DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") -) - -// Call-level attributes for Cassandra -const ( - // The fetch size used for paging, i.e. how many rows will be returned at once. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 5000 - DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") - // The consistency level of the query. Based on consistency values from - // [CQL](https://docs.datastax.com/en/cassandra- - // oss/3.0/cassandra/dml/dmlConfigConsistency.html). - // - // Type: Enum - // Required: No - // Stability: stable - DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") - // The name of the primary table that the operation is acting upon, including the - // keyspace name (if applicable). - // - // Type: string - // Required: Recommended if available. - // Stability: stable - // Examples: 'mytable' - // Note: This mirrors the db.sql.table attribute but references cassandra rather - // than sql. It is not recommended to attempt any client-side parsing of - // `db.statement` just to get this property, but it should be set if it is - // provided by the library being instrumented. If the operation is acting upon an - // anonymous table, or more than one table, this value MUST NOT be set. - DBCassandraTableKey = attribute.Key("db.cassandra.table") - // Whether or not the query is idempotent. - // - // Type: boolean - // Required: No - // Stability: stable - DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") - // The number of times a query was speculatively executed. Not set or `0` if the - // query was not executed speculatively. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 0, 2 - DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") - // The ID of the coordinating node for a query. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' - DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") - // The data center of the coordinating node for a query. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'us-west-2' - DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") -) - -var ( - // all - DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") - // each_quorum - DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") - // quorum - DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") - // local_quorum - DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") - // one - DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") - // two - DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") - // three - DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") - // local_one - DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") - // any - DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") - // serial - DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") - // local_serial - DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") -) - -// Call-level attributes for Redis -const ( - // The index of the database being accessed as used in the [`SELECT` - // command](https://redis.io/commands/select), provided as an integer. To be used - // instead of the generic `db.name` attribute. - // - // Type: int - // Required: Required, if other than the default database (`0`). - // Stability: stable - // Examples: 0, 1, 15 - DBRedisDBIndexKey = attribute.Key("db.redis.database_index") -) - -// Call-level attributes for MongoDB -const ( - // The collection being accessed within the database stated in `db.name`. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'customers', 'products' - DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") -) - -// Call-level attributes for SQL databases -const ( - // The name of the primary table that the operation is acting upon, including the - // database name (if applicable). - // - // Type: string - // Required: Recommended if available. - // Stability: stable - // Examples: 'public.users', 'customers' - // Note: It is not recommended to attempt any client-side parsing of - // `db.statement` just to get this property, but it should be set if it is - // provided by the library being instrumented. If the operation is acting upon an - // anonymous table, or more than one table, this value MUST NOT be set. - DBSQLTableKey = attribute.Key("db.sql.table") -) - -// This document defines the attributes used to report a single exception associated with a span. -const ( - // The type of the exception (its fully-qualified class name, if applicable). The - // dynamic type of the exception should be preferred over the static type in - // languages that support it. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'java.net.ConnectException', 'OSError' - ExceptionTypeKey = attribute.Key("exception.type") - // The exception message. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Division by zero', "Can't convert 'int' object to str implicitly" - ExceptionMessageKey = attribute.Key("exception.message") - // A stacktrace as a string in the natural representation for the language - // runtime. The representation is to be determined and documented by each language - // SIG. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test - // exception\\n at ' - // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' - // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' - // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' - ExceptionStacktraceKey = attribute.Key("exception.stacktrace") - // SHOULD be set to true if the exception event is recorded at a point where it is - // known that the exception is escaping the scope of the span. - // - // Type: boolean - // Required: No - // Stability: stable - // Note: An exception is considered to have escaped (or left) the scope of a span, - // if that span is ended while the exception is still logically "in flight". - // This may be actually "in flight" in some languages (e.g. if the exception - // is passed to a Context manager's `__exit__` method in Python) but will - // usually be caught at the point of recording the exception in most languages. - - // It is usually not possible to determine at the point where an exception is - // thrown - // whether it will escape the scope of a span. - // However, it is trivial to know that an exception - // will escape, if one checks for an active exception just before ending the span, - // as done in the [example above](#recording-an-exception). - - // It follows that an exception may still escape the scope of the span - // even if the `exception.escaped` attribute was not set or set to false, - // since the event might have been recorded at a time where it was not - // clear whether the exception will escape. - ExceptionEscapedKey = attribute.Key("exception.escaped") -) - -// This semantic convention describes an instance of a function that runs without provisioning or managing of servers (also known as serverless functions or Function as a Service (FaaS)) with spans. -const ( - // Type of the trigger which caused this function execution. - // - // Type: Enum - // Required: No - // Stability: stable - // Note: For the server/consumer span on the incoming side, - // `faas.trigger` MUST be set. - - // Clients invoking FaaS instances usually cannot set `faas.trigger`, - // since they would typically need to look in the payload to determine - // the event type. If clients set it, it should be the same as the - // trigger that corresponding incoming would have (i.e., this has - // nothing to do with the underlying transport used to make the API - // call to invoke the lambda, which is often HTTP). - FaaSTriggerKey = attribute.Key("faas.trigger") - // The execution ID of the current function execution. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' - FaaSExecutionKey = attribute.Key("faas.execution") -) - -var ( - // A response to some data source operation such as a database or filesystem read/write - FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") - // To provide an answer to an inbound HTTP request - FaaSTriggerHTTP = FaaSTriggerKey.String("http") - // A function is set to be executed when messages are sent to a messaging system - FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") - // A function is scheduled to be executed regularly - FaaSTriggerTimer = FaaSTriggerKey.String("timer") - // If none of the others apply - FaaSTriggerOther = FaaSTriggerKey.String("other") -) - -// Semantic Convention for FaaS triggered as a response to some data source operation such as a database or filesystem read/write. -const ( - // The name of the source on which the triggering operation was performed. For - // example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos - // DB to the database name. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'myBucketName', 'myDBName' - FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") - // Describes the type of the operation that was performed on the data. - // - // Type: Enum - // Required: Always - // Stability: stable - FaaSDocumentOperationKey = attribute.Key("faas.document.operation") - // A string containing the time when the data was accessed in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed - // in [UTC](https://www.w3.org/TR/NOTE-datetime). - // - // Type: string - // Required: Always - // Stability: stable - // Examples: '2020-01-23T13:47:06Z' - FaaSDocumentTimeKey = attribute.Key("faas.document.time") - // The document name/table subjected to the operation. For example, in Cloud - // Storage or S3 is the name of the file, and in Cosmos DB the table name. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'myFile.txt', 'myTableName' - FaaSDocumentNameKey = attribute.Key("faas.document.name") -) - -var ( - // When a new object is created - FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") - // When an object is modified - FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") - // When an object is deleted - FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") -) - -// Semantic Convention for FaaS scheduled to be executed regularly. -const ( - // A string containing the function invocation time in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed - // in [UTC](https://www.w3.org/TR/NOTE-datetime). - // - // Type: string - // Required: Always - // Stability: stable - // Examples: '2020-01-23T13:47:06Z' - FaaSTimeKey = attribute.Key("faas.time") - // A string containing the schedule period as [Cron Expression](https://docs.oracl - // e.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). - // - // Type: string - // Required: No - // Stability: stable - // Examples: '0/5 * * * ? *' - FaaSCronKey = attribute.Key("faas.cron") -) - -// Contains additional attributes for incoming FaaS spans. -const ( - // A boolean that is true if the serverless function is executed for the first - // time (aka cold-start). - // - // Type: boolean - // Required: No - // Stability: stable - FaaSColdstartKey = attribute.Key("faas.coldstart") -) - -// Contains additional attributes for outgoing FaaS spans. -const ( - // The name of the invoked function. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'my-function' - // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked - // function. - FaaSInvokedNameKey = attribute.Key("faas.invoked_name") - // The cloud provider of the invoked function. - // - // Type: Enum - // Required: Always - // Stability: stable - // Note: SHOULD be equal to the `cloud.provider` resource attribute of the invoked - // function. - FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") - // The cloud region of the invoked function. - // - // Type: string - // Required: For some cloud providers, like AWS or GCP, the region in which a - // function is hosted is essential to uniquely identify the function and also part - // of its endpoint. Since it's part of the endpoint being called, the region is - // always known to clients. In these cases, `faas.invoked_region` MUST be set - // accordingly. If the region is unknown to the client or not required for - // identifying the invoked function, setting `faas.invoked_region` is optional. - // Stability: stable - // Examples: 'eu-central-1' - // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked - // function. - FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") -) - -var ( - // Alibaba Cloud - FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") - // Amazon Web Services - FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") - // Microsoft Azure - FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") - // Google Cloud Platform - FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") - // Tencent Cloud - FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") -) - -// These attributes may be used for any network related operation. -const ( - // Transport protocol used. See note below. - // - // Type: Enum - // Required: No - // Stability: stable - NetTransportKey = attribute.Key("net.transport") - // Remote address of the peer (dotted decimal for IPv4 or - // [RFC5952](https://tools.ietf.org/html/rfc5952) for IPv6) - // - // Type: string - // Required: No - // Stability: stable - // Examples: '127.0.0.1' - NetPeerIPKey = attribute.Key("net.peer.ip") - // Remote port number. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 80, 8080, 443 - NetPeerPortKey = attribute.Key("net.peer.port") - // Remote hostname or similar, see note below. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'example.com' - // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an extra - // DNS lookup. - NetPeerNameKey = attribute.Key("net.peer.name") - // Like `net.peer.ip` but for the host IP. Useful in case of a multi-IP host. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '192.168.0.1' - NetHostIPKey = attribute.Key("net.host.ip") - // Like `net.peer.port` but for the host port. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 35555 - NetHostPortKey = attribute.Key("net.host.port") - // Local hostname or similar, see note below. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'localhost' - NetHostNameKey = attribute.Key("net.host.name") - // The internet connection type currently being used by the host. - // - // Type: Enum - // Required: No - // Stability: stable - // Examples: 'wifi' - NetHostConnectionTypeKey = attribute.Key("net.host.connection.type") - // This describes more details regarding the connection.type. It may be the type - // of cell technology connection, but it could be used for describing details - // about a wifi connection. - // - // Type: Enum - // Required: No - // Stability: stable - // Examples: 'LTE' - NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype") - // The name of the mobile carrier. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'sprint' - NetHostCarrierNameKey = attribute.Key("net.host.carrier.name") - // The mobile carrier country code. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '310' - NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc") - // The mobile carrier network code. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '001' - NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc") - // The ISO 3166-1 alpha-2 2-character country code associated with the mobile - // carrier network. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'DE' - NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc") -) - -var ( - // ip_tcp - NetTransportTCP = NetTransportKey.String("ip_tcp") - // ip_udp - NetTransportUDP = NetTransportKey.String("ip_udp") - // Another IP-based protocol - NetTransportIP = NetTransportKey.String("ip") - // Unix Domain socket. See below - NetTransportUnix = NetTransportKey.String("unix") - // Named or anonymous pipe. See note below - NetTransportPipe = NetTransportKey.String("pipe") - // In-process communication - NetTransportInProc = NetTransportKey.String("inproc") - // Something else (non IP-based) - NetTransportOther = NetTransportKey.String("other") -) - -var ( - // wifi - NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi") - // wired - NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired") - // cell - NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell") - // unavailable - NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable") - // unknown - NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown") -) - -var ( - // GPRS - NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs") - // EDGE - NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge") - // UMTS - NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts") - // CDMA - NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma") - // EVDO Rel. 0 - NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0") - // EVDO Rev. A - NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a") - // CDMA2000 1XRTT - NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt") - // HSDPA - NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa") - // HSUPA - NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa") - // HSPA - NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa") - // IDEN - NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden") - // EVDO Rev. B - NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b") - // LTE - NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte") - // EHRPD - NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd") - // HSPAP - NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap") - // GSM - NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm") - // TD-SCDMA - NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma") - // IWLAN - NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan") - // 5G NR (New Radio) - NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr") - // 5G NRNSA (New Radio Non-Standalone) - NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa") - // LTE CA - NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca") -) - -// Operations that access some remote service. -const ( - // The [`service.name`](../../resource/semantic_conventions/README.md#service) of - // the remote service. SHOULD be equal to the actual `service.name` resource - // attribute of the remote service if any. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'AuthTokenCache' - PeerServiceKey = attribute.Key("peer.service") -) - -// These attributes may be used for any operation with an authenticated and/or authorized enduser. -const ( - // Username or client_id extracted from the access token or - // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in the - // inbound request from outside the system. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'username' - EnduserIDKey = attribute.Key("enduser.id") - // Actual/assumed role the client is making the request under extracted from token - // or application security context. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'admin' - EnduserRoleKey = attribute.Key("enduser.role") - // Scopes or granted authorities the client currently possesses extracted from - // token or application security context. The value would come from the scope - // associated with an [OAuth 2.0 Access - // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute value - // in a [SAML 2.0 Assertion](http://docs.oasis- - // open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'read:message, write:files' - EnduserScopeKey = attribute.Key("enduser.scope") -) - -// These attributes may be used for any operation to store information about a thread that started a span. -const ( - // Current "managed" thread ID (as opposed to OS thread ID). - // - // Type: int - // Required: No - // Stability: stable - // Examples: 42 - ThreadIDKey = attribute.Key("thread.id") - // Current thread name. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'main' - ThreadNameKey = attribute.Key("thread.name") -) - -// These attributes allow to report this unit of code and therefore to provide more context about the span. -const ( - // The method or function name, or equivalent (usually rightmost part of the code - // unit's name). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'serveRequest' - CodeFunctionKey = attribute.Key("code.function") - // The "namespace" within which `code.function` is defined. Usually the qualified - // class or module name, such that `code.namespace` + some separator + - // `code.function` form a unique identifier for the code unit. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'com.example.MyHTTPService' - CodeNamespaceKey = attribute.Key("code.namespace") - // The source code file name that identifies the code unit as uniquely as possible - // (preferably an absolute file path). - // - // Type: string - // Required: No - // Stability: stable - // Examples: '/usr/local/MyApplication/content_root/app/index.php' - CodeFilepathKey = attribute.Key("code.filepath") - // The line number in `code.filepath` best representing the operation. It SHOULD - // point within the code unit named in `code.function`. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 42 - CodeLineNumberKey = attribute.Key("code.lineno") -) - -// This document defines semantic conventions for HTTP client and server Spans. -const ( - // HTTP request method. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'GET', 'POST', 'HEAD' - HTTPMethodKey = attribute.Key("http.method") - // Full HTTP request URL in the form `scheme://host[:port]/path?query[#fragment]`. - // Usually the fragment is not transmitted over HTTP, but if it is known, it - // should be included nevertheless. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' - // Note: `http.url` MUST NOT contain credentials passed via URL in form of - // `https://username:password@www.example.com/`. In such case the attribute's - // value should be `https://www.example.com/`. - HTTPURLKey = attribute.Key("http.url") - // The full request target as passed in a HTTP request line or equivalent. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '/path/12314/?q=ddds#123' - HTTPTargetKey = attribute.Key("http.target") - // The value of the [HTTP host - // header](https://tools.ietf.org/html/rfc7230#section-5.4). An empty Host header - // should also be reported, see note. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'www.example.org' - // Note: When the header is present but empty the attribute SHOULD be set to the - // empty string. Note that this is a valid situation that is expected in certain - // cases, according the aforementioned [section of RFC - // 7230](https://tools.ietf.org/html/rfc7230#section-5.4). When the header is not - // set the attribute MUST NOT be set. - HTTPHostKey = attribute.Key("http.host") - // The URI scheme identifying the used protocol. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'http', 'https' - HTTPSchemeKey = attribute.Key("http.scheme") - // [HTTP response status code](https://tools.ietf.org/html/rfc7231#section-6). - // - // Type: int - // Required: If and only if one was received/sent. - // Stability: stable - // Examples: 200 - HTTPStatusCodeKey = attribute.Key("http.status_code") - // Kind of HTTP protocol used. - // - // Type: Enum - // Required: No - // Stability: stable - // Note: If `net.transport` is not specified, it can be assumed to be `IP.TCP` - // except if `http.flavor` is `QUIC`, in which case `IP.UDP` is assumed. - HTTPFlavorKey = attribute.Key("http.flavor") - // Value of the [HTTP User- - // Agent](https://tools.ietf.org/html/rfc7231#section-5.5.3) header sent by the - // client. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' - HTTPUserAgentKey = attribute.Key("http.user_agent") - // The size of the request payload body in bytes. This is the number of bytes - // transferred excluding headers and is often, but not always, present as the - // [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For - // requests using transport encoding, this should be the compressed size. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 3495 - HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") - // The size of the uncompressed request payload body after transport decoding. Not - // set if transport encoding not used. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 5493 - HTTPRequestContentLengthUncompressedKey = attribute.Key("http.request_content_length_uncompressed") - // The size of the response payload body in bytes. This is the number of bytes - // transferred excluding headers and is often, but not always, present as the - // [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For - // requests using transport encoding, this should be the compressed size. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 3495 - HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") - // The size of the uncompressed response payload body after transport decoding. - // Not set if transport encoding not used. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 5493 - HTTPResponseContentLengthUncompressedKey = attribute.Key("http.response_content_length_uncompressed") - // The ordinal number of request re-sending attempt. - // - // Type: int - // Required: If and only if a request was retried. - // Stability: stable - // Examples: 3 - HTTPRetryCountKey = attribute.Key("http.retry_count") -) - -var ( - // HTTP/1.0 - HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") - // HTTP/1.1 - HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") - // HTTP/2 - HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") - // HTTP/3 - HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0") - // SPDY protocol - HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") - // QUIC protocol - HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") -) - -// Semantic Convention for HTTP Server -const ( - // The primary server name of the matched virtual host. This should be obtained - // via configuration. If no such configuration can be obtained, this attribute - // MUST NOT be set ( `net.host.name` should be used instead). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'example.com' - // Note: `http.url` is usually not readily available on the server side but would - // have to be assembled in a cumbersome and sometimes lossy process from other - // information (see e.g. open-telemetry/opentelemetry-python/pull/148). It is thus - // preferred to supply the raw data that is available. - HTTPServerNameKey = attribute.Key("http.server_name") - // The matched route (path template). - // - // Type: string - // Required: No - // Stability: stable - // Examples: '/users/:userID?' - HTTPRouteKey = attribute.Key("http.route") - // The IP address of the original client behind all proxies, if known (e.g. from - // [X-Forwarded-For](https://developer.mozilla.org/en- - // US/docs/Web/HTTP/Headers/X-Forwarded-For)). - // - // Type: string - // Required: No - // Stability: stable - // Examples: '83.164.160.102' - // Note: This is not necessarily the same as `net.peer.ip`, which would - // identify the network-level peer, which may be a proxy. - - // This attribute should be set when a source of information different - // from the one used for `net.peer.ip`, is available even if that other - // source just confirms the same value as `net.peer.ip`. - // Rationale: For `net.peer.ip`, one typically does not know if it - // comes from a proxy, reverse proxy, or the actual client. Setting - // `http.client_ip` when it's the same as `net.peer.ip` means that - // one is at least somewhat confident that the address is not that of - // the closest proxy. - HTTPClientIPKey = attribute.Key("http.client_ip") -) - -// Attributes that exist for multiple DynamoDB request types. -const ( - // The keys in the `RequestItems` object field. - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: 'Users', 'Cats' - AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") - // The JSON-serialized value of each item in the `ConsumedCapacity` response - // field. - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : { - // "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": - // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, - // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, - // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, - // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": - // "string", "WriteCapacityUnits": number }' - AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") - // The JSON-serialized value of the `ItemCollectionMetrics` response field. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, - // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : - // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": - // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }' - AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") - // The value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter. - // - // Type: double - // Required: No - // Stability: stable - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") - // The value of the `ProvisionedThroughput.WriteCapacityUnits` request parameter. - // - // Type: double - // Required: No - // Stability: stable - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") - // The value of the `ConsistentRead` request parameter. - // - // Type: boolean - // Required: No - // Stability: stable - AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") - // The value of the `ProjectionExpression` request parameter. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Title', 'Title, Price, Color', 'Title, Description, RelatedItems, - // ProductReviews' - AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") - // The value of the `Limit` request parameter. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 10 - AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") - // The value of the `AttributesToGet` request parameter. - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: 'lives', 'id' - AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") - // The value of the `IndexName` request parameter. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'name_to_group' - AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") - // The value of the `Select` request parameter. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'ALL_ATTRIBUTES', 'COUNT' - AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") -) - -// DynamoDB.CreateTable -const ( - // The JSON-serialized value of each item of the `GlobalSecondaryIndexes` request - // field - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": "string", - // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], - // "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits": - // number, "WriteCapacityUnits": number } }' - AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") - // The JSON-serialized value of each item of the `LocalSecondaryIndexes` request - // field. - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: '{ "IndexARN": "string", "IndexName": "string", "IndexSizeBytes": - // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", - // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], - // "ProjectionType": "string" } }' - AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") -) - -// DynamoDB.ListTables -const ( - // The value of the `ExclusiveStartTableName` request parameter. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Users', 'CatsTable' - AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") - // The the number of items in the `TableNames` response parameter. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 20 - AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") -) - -// DynamoDB.Query -const ( - // The value of the `ScanIndexForward` request parameter. - // - // Type: boolean - // Required: No - // Stability: stable - AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") -) - -// DynamoDB.Scan -const ( - // The value of the `Segment` request parameter. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 10 - AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") - // The value of the `TotalSegments` request parameter. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 100 - AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") - // The value of the `Count` response parameter. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 10 - AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") - // The value of the `ScannedCount` response parameter. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 50 - AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") -) - -// DynamoDB.UpdateTable -const ( - // The JSON-serialized value of each item in the `AttributeDefinitions` request - // field. - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' - AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") - // The JSON-serialized value of each item in the the `GlobalSecondaryIndexUpdates` - // request field. - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { - // "AttributeName": "string", "KeyType": "string" } ], "Projection": { - // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, - // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": - // number } }' - AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") -) - -// This document defines the attributes used in messaging systems. -const ( - // A string identifying the messaging system. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' - MessagingSystemKey = attribute.Key("messaging.system") - // The message destination name. This might be equal to the span name but is - // required nevertheless. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'MyQueue', 'MyTopic' - MessagingDestinationKey = attribute.Key("messaging.destination") - // The kind of message destination - // - // Type: Enum - // Required: Required only if the message destination is either a `queue` or - // `topic`. - // Stability: stable - MessagingDestinationKindKey = attribute.Key("messaging.destination_kind") - // A boolean that is true if the message destination is temporary. - // - // Type: boolean - // Required: If missing, it is assumed to be false. - // Stability: stable - MessagingTempDestinationKey = attribute.Key("messaging.temp_destination") - // The name of the transport protocol. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'AMQP', 'MQTT' - MessagingProtocolKey = attribute.Key("messaging.protocol") - // The version of the transport protocol. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '0.9.1' - MessagingProtocolVersionKey = attribute.Key("messaging.protocol_version") - // Connection string. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'tibjmsnaming://localhost:7222', - // 'https://queue.amazonaws.com/80398EXAMPLE/MyQueue' - MessagingURLKey = attribute.Key("messaging.url") - // A value used by the messaging system as an identifier for the message, - // represented as a string. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '452a7c7c7c7048c2f887f61572b18fc2' - MessagingMessageIDKey = attribute.Key("messaging.message_id") - // The [conversation ID](#conversations) identifying the conversation to which the - // message belongs, represented as a string. Sometimes called "Correlation ID". - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'MyConversationID' - MessagingConversationIDKey = attribute.Key("messaging.conversation_id") - // The (uncompressed) size of the message payload in bytes. Also use this - // attribute if it is unknown whether the compressed or uncompressed payload size - // is reported. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 2738 - MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message_payload_size_bytes") - // The compressed size of the message payload in bytes. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 2048 - MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message_payload_compressed_size_bytes") -) - -var ( - // A message sent to a queue - MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue") - // A message sent to a topic - MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic") -) - -// Semantic convention for a consumer of messages received from a messaging system -const ( - // A string identifying the kind of message consumption as defined in the - // [Operation names](#operation-names) section above. If the operation is "send", - // this attribute MUST NOT be set, since the operation can be inferred from the - // span kind in that case. - // - // Type: Enum - // Required: No - // Stability: stable - MessagingOperationKey = attribute.Key("messaging.operation") - // The identifier for the consumer receiving a message. For Kafka, set it to - // `{messaging.kafka.consumer_group} - {messaging.kafka.client_id}`, if both are - // present, or only `messaging.kafka.consumer_group`. For brokers, such as - // RabbitMQ and Artemis, set it to the `client_id` of the client consuming the - // message. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'mygroup - client-6' - MessagingConsumerIDKey = attribute.Key("messaging.consumer_id") -) - -var ( - // receive - MessagingOperationReceive = MessagingOperationKey.String("receive") - // process - MessagingOperationProcess = MessagingOperationKey.String("process") -) - -// Attributes for RabbitMQ -const ( - // RabbitMQ message routing key. - // - // Type: string - // Required: Unless it is empty. - // Stability: stable - // Examples: 'myKey' - MessagingRabbitmqRoutingKeyKey = attribute.Key("messaging.rabbitmq.routing_key") -) - -// Attributes for Apache Kafka -const ( - // Message keys in Kafka are used for grouping alike messages to ensure they're - // processed on the same partition. They differ from `messaging.message_id` in - // that they're not unique. If the key is `null`, the attribute MUST NOT be set. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'myKey' - // Note: If the key type is not string, it's string representation has to be - // supplied for the attribute. If the key has no unambiguous, canonical string - // form, don't include its value. - MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message_key") - // Name of the Kafka Consumer Group that is handling the message. Only applies to - // consumers, not producers. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'my-group' - MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer_group") - // Client ID for the Consumer or Producer that is handling the message. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'client-5' - MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") - // Partition the message is sent to. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 2 - MessagingKafkaPartitionKey = attribute.Key("messaging.kafka.partition") - // A boolean that is true if the message is a tombstone. - // - // Type: boolean - // Required: If missing, it is assumed to be false. - // Stability: stable - MessagingKafkaTombstoneKey = attribute.Key("messaging.kafka.tombstone") -) - -// Attributes for Apache RocketMQ -const ( - // Namespace of RocketMQ resources, resources in different namespaces are - // individual. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'myNamespace' - MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") - // Name of the RocketMQ producer/consumer group that is handling the message. The - // client type is identified by the SpanKind. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'myConsumerGroup' - MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") - // The unique identifier for each client. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'myhost@8742@s8083jm' - MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id") - // Type of message. - // - // Type: Enum - // Required: No - // Stability: stable - MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message_type") - // The secondary classifier of message besides topic. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'tagA' - MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message_tag") - // Key(s) of message, another way to mark message besides message id. - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: 'keyA', 'keyB' - MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message_keys") - // Model of message consumption. This only applies to consumer spans. - // - // Type: Enum - // Required: No - // Stability: stable - MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") -) - -var ( - // Normal message - MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") - // FIFO message - MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") - // Delay message - MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") - // Transaction message - MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") -) - -var ( - // Clustering consumption model - MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") - // Broadcasting consumption model - MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") -) - -// This document defines semantic conventions for remote procedure calls. -const ( - // A string identifying the remoting system. See below for a list of well-known - // identifiers. - // - // Type: Enum - // Required: Always - // Stability: stable - RPCSystemKey = attribute.Key("rpc.system") - // The full (logical) name of the service being called, including its package - // name, if applicable. - // - // Type: string - // Required: No, but recommended - // Stability: stable - // Examples: 'myservice.EchoService' - // Note: This is the logical name of the service from the RPC interface - // perspective, which can be different from the name of any implementing class. - // The `code.namespace` attribute may be used to store the latter (despite the - // attribute name, it may include a class name; e.g., class with method actually - // executing the call on the server side, RPC client stub class on the client - // side). - RPCServiceKey = attribute.Key("rpc.service") - // The name of the (logical) method being called, must be equal to the $method - // part in the span name. - // - // Type: string - // Required: No, but recommended - // Stability: stable - // Examples: 'exampleMethod' - // Note: This is the logical name of the method from the RPC interface - // perspective, which can be different from the name of any implementing - // method/function. The `code.function` attribute may be used to store the latter - // (e.g., method actually executing the call on the server side, RPC client stub - // method on the client side). - RPCMethodKey = attribute.Key("rpc.method") -) - -var ( - // gRPC - RPCSystemGRPC = RPCSystemKey.String("grpc") - // Java RMI - RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") - // .NET WCF - RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") - // Apache Dubbo - RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") -) - -// Tech-specific attributes for gRPC. -const ( - // The [numeric status - // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of the gRPC - // request. - // - // Type: Enum - // Required: Always - // Stability: stable - RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") -) - -var ( - // OK - RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) - // CANCELLED - RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) - // UNKNOWN - RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) - // INVALID_ARGUMENT - RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) - // DEADLINE_EXCEEDED - RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) - // NOT_FOUND - RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) - // ALREADY_EXISTS - RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) - // PERMISSION_DENIED - RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) - // RESOURCE_EXHAUSTED - RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) - // FAILED_PRECONDITION - RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) - // ABORTED - RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) - // OUT_OF_RANGE - RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) - // UNIMPLEMENTED - RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) - // INTERNAL - RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) - // UNAVAILABLE - RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) - // DATA_LOSS - RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) - // UNAUTHENTICATED - RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) -) - -// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). -const ( - // Protocol version as in `jsonrpc` property of request/response. Since JSON-RPC - // 1.0 does not specify this, the value can be omitted. - // - // Type: string - // Required: If missing, it is assumed to be "1.0". - // Stability: stable - // Examples: '2.0', '1.0' - RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") - // `id` property of request or response. Since protocol allows id to be int, - // string, `null` or missing (for notifications), value is expected to be cast to - // string for simplicity. Use empty string in case of `null` value. Omit entirely - // if this is a notification. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '10', 'request-7', '' - RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") - // `error.code` property of response if it is an error response. - // - // Type: int - // Required: If missing, response is assumed to be successful. - // Stability: stable - // Examples: -32700, 100 - RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") - // `error.message` property of response if it is an error response. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Parse error', 'User already exists' - RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") -) - -// RPC received/sent message. -const ( - // Whether this is a received or sent message. - // - // Type: Enum - // Required: No - // Stability: stable - MessageTypeKey = attribute.Key("message.type") - // MUST be calculated as two different counters starting from `1` one for sent - // messages and one for received message. - // - // Type: int - // Required: No - // Stability: stable - // Note: This way we guarantee that the values will be consistent between - // different implementations. - MessageIDKey = attribute.Key("message.id") - // Compressed size of the message in bytes. - // - // Type: int - // Required: No - // Stability: stable - MessageCompressedSizeKey = attribute.Key("message.compressed_size") - // Uncompressed size of the message in bytes. - // - // Type: int - // Required: No - // Stability: stable - MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") -) - -var ( - // sent - MessageTypeSent = MessageTypeKey.String("SENT") - // received - MessageTypeReceived = MessageTypeKey.String("RECEIVED") -) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/httpconv/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/httpconv/http.go new file mode 100644 index 0000000000..fc43808fe4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/httpconv/http.go @@ -0,0 +1,152 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package httpconv provides OpenTelemetry HTTP semantic conventions for +// tracing telemetry. +package httpconv // import "go.opentelemetry.io/otel/semconv/v1.17.0/httpconv" + +import ( + "net/http" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/semconv/internal/v2" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" +) + +var ( + nc = &internal.NetConv{ + NetHostNameKey: semconv.NetHostNameKey, + NetHostPortKey: semconv.NetHostPortKey, + NetPeerNameKey: semconv.NetPeerNameKey, + NetPeerPortKey: semconv.NetPeerPortKey, + NetSockPeerAddrKey: semconv.NetSockPeerAddrKey, + NetSockPeerPortKey: semconv.NetSockPeerPortKey, + NetTransportOther: semconv.NetTransportOther, + NetTransportTCP: semconv.NetTransportTCP, + NetTransportUDP: semconv.NetTransportUDP, + NetTransportInProc: semconv.NetTransportInProc, + } + + hc = &internal.HTTPConv{ + NetConv: nc, + + EnduserIDKey: semconv.EnduserIDKey, + HTTPClientIPKey: semconv.HTTPClientIPKey, + HTTPFlavorKey: semconv.HTTPFlavorKey, + HTTPMethodKey: semconv.HTTPMethodKey, + HTTPRequestContentLengthKey: semconv.HTTPRequestContentLengthKey, + HTTPResponseContentLengthKey: semconv.HTTPResponseContentLengthKey, + HTTPRouteKey: semconv.HTTPRouteKey, + HTTPSchemeHTTP: semconv.HTTPSchemeHTTP, + HTTPSchemeHTTPS: semconv.HTTPSchemeHTTPS, + HTTPStatusCodeKey: semconv.HTTPStatusCodeKey, + HTTPTargetKey: semconv.HTTPTargetKey, + HTTPURLKey: semconv.HTTPURLKey, + HTTPUserAgentKey: semconv.HTTPUserAgentKey, + } +) + +// ClientResponse returns trace attributes for an HTTP response received by a +// client from a server. It will return the following attributes if the related +// values are defined in resp: "http.status.code", +// "http.response_content_length". +// +// This does not add all OpenTelemetry required attributes for an HTTP event, +// it assumes ClientRequest was used to create the span with a complete set of +// attributes. If a complete set of attributes can be generated using the +// request contained in resp. For example: +// +// append(ClientResponse(resp), ClientRequest(resp.Request)...) +func ClientResponse(resp *http.Response) []attribute.KeyValue { + return hc.ClientResponse(resp) +} + +// ClientRequest returns trace attributes for an HTTP request made by a client. +// The following attributes are always returned: "http.url", "http.flavor", +// "http.method", "net.peer.name". The following attributes are returned if the +// related values are defined in req: "net.peer.port", "http.user_agent", +// "http.request_content_length", "enduser.id". +func ClientRequest(req *http.Request) []attribute.KeyValue { + return hc.ClientRequest(req) +} + +// ClientStatus returns a span status code and message for an HTTP status code +// value received by a client. +func ClientStatus(code int) (codes.Code, string) { + return hc.ClientStatus(code) +} + +// ServerRequest returns trace attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +// +// The following attributes are always returned: "http.method", "http.scheme", +// "http.flavor", "http.target", "net.host.name". The following attributes are +// returned if they related values are defined in req: "net.host.port", +// "net.sock.peer.addr", "net.sock.peer.port", "http.user_agent", "enduser.id", +// "http.client_ip". +func ServerRequest(server string, req *http.Request) []attribute.KeyValue { + return hc.ServerRequest(server, req) +} + +// ServerStatus returns a span status code and message for an HTTP status code +// value returned by a server. Status codes in the 400-499 range are not +// returned as errors. +func ServerStatus(code int) (codes.Code, string) { + return hc.ServerStatus(code) +} + +// RequestHeader returns the contents of h as attributes. +// +// Instrumentation should require an explicit configuration of which headers to +// captured and then prune what they pass here. Including all headers can be a +// security risk - explicit configuration helps avoid leaking sensitive +// information. +// +// The User-Agent header is already captured in the http.user_agent attribute +// from ClientRequest and ServerRequest. Instrumentation may provide an option +// to capture that header here even though it is not recommended. Otherwise, +// instrumentation should filter that out of what is passed. +func RequestHeader(h http.Header) []attribute.KeyValue { + return hc.RequestHeader(h) +} + +// ResponseHeader returns the contents of h as attributes. +// +// Instrumentation should require an explicit configuration of which headers to +// captured and then prune what they pass here. Including all headers can be a +// security risk - explicit configuration helps avoid leaking sensitive +// information. +// +// The User-Agent header is already captured in the http.user_agent attribute +// from ClientRequest and ServerRequest. Instrumentation may provide an option +// to capture that header here even though it is not recommended. Otherwise, +// instrumentation should filter that out of what is passed. +func ResponseHeader(h http.Header) []attribute.KeyValue { + return hc.ResponseHeader(h) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go new file mode 100644 index 0000000000..67d1d4c44d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go @@ -0,0 +1,1209 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" + +import "go.opentelemetry.io/otel/attribute" + +// Describes HTTP attributes. +const ( + // HTTPMethodKey is the attribute Key conforming to the "http.method" + // semantic conventions. It represents the hTTP request method. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + HTTPMethodKey = attribute.Key("http.method") + + // HTTPStatusCodeKey is the attribute Key conforming to the + // "http.status_code" semantic conventions. It represents the [HTTP + // response status code](https://tools.ietf.org/html/rfc7231#section-6). + // + // Type: int + // RequirementLevel: ConditionallyRequired (If and only if one was + // received/sent.) + // Stability: stable + // Examples: 200 + HTTPStatusCodeKey = attribute.Key("http.status_code") +) + +// HTTPMethod returns an attribute KeyValue conforming to the "http.method" +// semantic conventions. It represents the hTTP request method. +func HTTPMethod(val string) attribute.KeyValue { + return HTTPMethodKey.String(val) +} + +// HTTPStatusCode returns an attribute KeyValue conforming to the +// "http.status_code" semantic conventions. It represents the [HTTP response +// status code](https://tools.ietf.org/html/rfc7231#section-6). +func HTTPStatusCode(val int) attribute.KeyValue { + return HTTPStatusCodeKey.Int(val) +} + +// HTTP Server spans attributes +const ( + // HTTPSchemeKey is the attribute Key conforming to the "http.scheme" + // semantic conventions. It represents the URI scheme identifying the used + // protocol. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'http', 'https' + HTTPSchemeKey = attribute.Key("http.scheme") + + // HTTPRouteKey is the attribute Key conforming to the "http.route" + // semantic conventions. It represents the matched route (path template in + // the format used by the respective server framework). See note below + // + // Type: string + // RequirementLevel: ConditionallyRequired (If and only if it's available) + // Stability: stable + // Examples: '/users/:userID?', '{controller}/{action}/{id?}' + // Note: MUST NOT be populated when this is not supported by the HTTP + // server framework as the route attribute should have low-cardinality and + // the URI path can NOT substitute it. + // SHOULD include the [application + // root](/specification/trace/semantic_conventions/http.md#http-server-definitions) + // if there is one. + HTTPRouteKey = attribute.Key("http.route") +) + +// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme" +// semantic conventions. It represents the URI scheme identifying the used +// protocol. +func HTTPScheme(val string) attribute.KeyValue { + return HTTPSchemeKey.String(val) +} + +// HTTPRoute returns an attribute KeyValue conforming to the "http.route" +// semantic conventions. It represents the matched route (path template in the +// format used by the respective server framework). See note below +func HTTPRoute(val string) attribute.KeyValue { + return HTTPRouteKey.String(val) +} + +// Attributes for Events represented using Log Records. +const ( + // EventNameKey is the attribute Key conforming to the "event.name" + // semantic conventions. It represents the name identifies the event. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'click', 'exception' + EventNameKey = attribute.Key("event.name") + + // EventDomainKey is the attribute Key conforming to the "event.domain" + // semantic conventions. It represents the domain identifies the business + // context for the events. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: Events across different domains may have same `event.name`, yet be + // unrelated events. + EventDomainKey = attribute.Key("event.domain") +) + +var ( + // Events from browser apps + EventDomainBrowser = EventDomainKey.String("browser") + // Events from mobile apps + EventDomainDevice = EventDomainKey.String("device") + // Events from Kubernetes + EventDomainK8S = EventDomainKey.String("k8s") +) + +// EventName returns an attribute KeyValue conforming to the "event.name" +// semantic conventions. It represents the name identifies the event. +func EventName(val string) attribute.KeyValue { + return EventNameKey.String(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetTransportKey is the attribute Key conforming to the "net.transport" + // semantic conventions. It represents the transport protocol used. See + // note below. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + NetTransportKey = attribute.Key("net.transport") + + // NetProtocolNameKey is the attribute Key conforming to the + // "net.protocol.name" semantic conventions. It represents the application + // layer protocol used. The value SHOULD be normalized to lowercase. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'amqp', 'http', 'mqtt' + NetProtocolNameKey = attribute.Key("net.protocol.name") + + // NetProtocolVersionKey is the attribute Key conforming to the + // "net.protocol.version" semantic conventions. It represents the version + // of the application layer protocol used. See note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '3.1.1' + // Note: `net.protocol.version` refers to the version of the protocol used + // and might be different from the protocol client's version. If the HTTP + // client used has a version of `0.27.2`, but sends HTTP version `1.1`, + // this attribute should be set to `1.1`. + NetProtocolVersionKey = attribute.Key("net.protocol.version") + + // NetSockPeerNameKey is the attribute Key conforming to the + // "net.sock.peer.name" semantic conventions. It represents the remote + // socket peer name. + // + // Type: string + // RequirementLevel: Recommended (If available and different from + // `net.peer.name` and if `net.sock.peer.addr` is set.) + // Stability: stable + // Examples: 'proxy.example.com' + NetSockPeerNameKey = attribute.Key("net.sock.peer.name") + + // NetSockPeerAddrKey is the attribute Key conforming to the + // "net.sock.peer.addr" semantic conventions. It represents the remote + // socket peer address: IPv4 or IPv6 for internet protocols, path for local + // communication, + // [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '127.0.0.1', '/tmp/mysql.sock' + NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") + + // NetSockPeerPortKey is the attribute Key conforming to the + // "net.sock.peer.port" semantic conventions. It represents the remote + // socket peer port. + // + // Type: int + // RequirementLevel: Recommended (If defined for the address family and if + // different than `net.peer.port` and if `net.sock.peer.addr` is set.) + // Stability: stable + // Examples: 16456 + NetSockPeerPortKey = attribute.Key("net.sock.peer.port") + + // NetSockFamilyKey is the attribute Key conforming to the + // "net.sock.family" semantic conventions. It represents the protocol + // [address + // family](https://man7.org/linux/man-pages/man7/address_families.7.html) + // which is used for communication. + // + // Type: Enum + // RequirementLevel: ConditionallyRequired (If different than `inet` and if + // any of `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers + // of telemetry SHOULD accept both IPv4 and IPv6 formats for the address in + // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support + // instrumentations that follow previous versions of this document.) + // Stability: stable + // Examples: 'inet6', 'bluetooth' + NetSockFamilyKey = attribute.Key("net.sock.family") + + // NetPeerNameKey is the attribute Key conforming to the "net.peer.name" + // semantic conventions. It represents the logical remote hostname, see + // note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'example.com' + // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an + // extra DNS lookup. + NetPeerNameKey = attribute.Key("net.peer.name") + + // NetPeerPortKey is the attribute Key conforming to the "net.peer.port" + // semantic conventions. It represents the logical remote port number + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 80, 8080, 443 + NetPeerPortKey = attribute.Key("net.peer.port") + + // NetHostNameKey is the attribute Key conforming to the "net.host.name" + // semantic conventions. It represents the logical local hostname or + // similar, see note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'localhost' + NetHostNameKey = attribute.Key("net.host.name") + + // NetHostPortKey is the attribute Key conforming to the "net.host.port" + // semantic conventions. It represents the logical local port number, + // preferably the one that the peer used to connect + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 8080 + NetHostPortKey = attribute.Key("net.host.port") + + // NetSockHostAddrKey is the attribute Key conforming to the + // "net.sock.host.addr" semantic conventions. It represents the local + // socket address. Useful in case of a multi-IP host. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '192.168.0.1' + NetSockHostAddrKey = attribute.Key("net.sock.host.addr") + + // NetSockHostPortKey is the attribute Key conforming to the + // "net.sock.host.port" semantic conventions. It represents the local + // socket port number. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If defined for the address + // family and if different than `net.host.port` and if `net.sock.host.addr` + // is set. In other cases, it is still recommended to set this.) + // Stability: stable + // Examples: 35555 + NetSockHostPortKey = attribute.Key("net.sock.host.port") +) + +var ( + // ip_tcp + NetTransportTCP = NetTransportKey.String("ip_tcp") + // ip_udp + NetTransportUDP = NetTransportKey.String("ip_udp") + // Named or anonymous pipe. See note below + NetTransportPipe = NetTransportKey.String("pipe") + // In-process communication + NetTransportInProc = NetTransportKey.String("inproc") + // Something else (non IP-based) + NetTransportOther = NetTransportKey.String("other") +) + +var ( + // IPv4 address + NetSockFamilyInet = NetSockFamilyKey.String("inet") + // IPv6 address + NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") + // Unix domain socket path + NetSockFamilyUnix = NetSockFamilyKey.String("unix") +) + +// NetProtocolName returns an attribute KeyValue conforming to the +// "net.protocol.name" semantic conventions. It represents the application +// layer protocol used. The value SHOULD be normalized to lowercase. +func NetProtocolName(val string) attribute.KeyValue { + return NetProtocolNameKey.String(val) +} + +// NetProtocolVersion returns an attribute KeyValue conforming to the +// "net.protocol.version" semantic conventions. It represents the version of +// the application layer protocol used. See note below. +func NetProtocolVersion(val string) attribute.KeyValue { + return NetProtocolVersionKey.String(val) +} + +// NetSockPeerName returns an attribute KeyValue conforming to the +// "net.sock.peer.name" semantic conventions. It represents the remote socket +// peer name. +func NetSockPeerName(val string) attribute.KeyValue { + return NetSockPeerNameKey.String(val) +} + +// NetSockPeerAddr returns an attribute KeyValue conforming to the +// "net.sock.peer.addr" semantic conventions. It represents the remote socket +// peer address: IPv4 or IPv6 for internet protocols, path for local +// communication, +// [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). +func NetSockPeerAddr(val string) attribute.KeyValue { + return NetSockPeerAddrKey.String(val) +} + +// NetSockPeerPort returns an attribute KeyValue conforming to the +// "net.sock.peer.port" semantic conventions. It represents the remote socket +// peer port. +func NetSockPeerPort(val int) attribute.KeyValue { + return NetSockPeerPortKey.Int(val) +} + +// NetPeerName returns an attribute KeyValue conforming to the +// "net.peer.name" semantic conventions. It represents the logical remote +// hostname, see note below. +func NetPeerName(val string) attribute.KeyValue { + return NetPeerNameKey.String(val) +} + +// NetPeerPort returns an attribute KeyValue conforming to the +// "net.peer.port" semantic conventions. It represents the logical remote port +// number +func NetPeerPort(val int) attribute.KeyValue { + return NetPeerPortKey.Int(val) +} + +// NetHostName returns an attribute KeyValue conforming to the +// "net.host.name" semantic conventions. It represents the logical local +// hostname or similar, see note below. +func NetHostName(val string) attribute.KeyValue { + return NetHostNameKey.String(val) +} + +// NetHostPort returns an attribute KeyValue conforming to the +// "net.host.port" semantic conventions. It represents the logical local port +// number, preferably the one that the peer used to connect +func NetHostPort(val int) attribute.KeyValue { + return NetHostPortKey.Int(val) +} + +// NetSockHostAddr returns an attribute KeyValue conforming to the +// "net.sock.host.addr" semantic conventions. It represents the local socket +// address. Useful in case of a multi-IP host. +func NetSockHostAddr(val string) attribute.KeyValue { + return NetSockHostAddrKey.String(val) +} + +// NetSockHostPort returns an attribute KeyValue conforming to the +// "net.sock.host.port" semantic conventions. It represents the local socket +// port number. +func NetSockHostPort(val int) attribute.KeyValue { + return NetSockHostPortKey.Int(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetHostConnectionTypeKey is the attribute Key conforming to the + // "net.host.connection.type" semantic conventions. It represents the + // internet connection type currently being used by the host. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'wifi' + NetHostConnectionTypeKey = attribute.Key("net.host.connection.type") + + // NetHostConnectionSubtypeKey is the attribute Key conforming to the + // "net.host.connection.subtype" semantic conventions. It represents the + // this describes more details regarding the connection.type. It may be the + // type of cell technology connection, but it could be used for describing + // details about a wifi connection. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'LTE' + NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype") + + // NetHostCarrierNameKey is the attribute Key conforming to the + // "net.host.carrier.name" semantic conventions. It represents the name of + // the mobile carrier. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'sprint' + NetHostCarrierNameKey = attribute.Key("net.host.carrier.name") + + // NetHostCarrierMccKey is the attribute Key conforming to the + // "net.host.carrier.mcc" semantic conventions. It represents the mobile + // carrier country code. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '310' + NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc") + + // NetHostCarrierMncKey is the attribute Key conforming to the + // "net.host.carrier.mnc" semantic conventions. It represents the mobile + // carrier network code. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '001' + NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc") + + // NetHostCarrierIccKey is the attribute Key conforming to the + // "net.host.carrier.icc" semantic conventions. It represents the ISO + // 3166-1 alpha-2 2-character country code associated with the mobile + // carrier network. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'DE' + NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc") +) + +var ( + // wifi + NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi") + // wired + NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired") + // cell + NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell") + // unavailable + NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable") + // unknown + NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown") +) + +var ( + // GPRS + NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs") + // EDGE + NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge") + // UMTS + NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts") + // CDMA + NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa") + // HSUPA + NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa") + // HSPA + NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa") + // IDEN + NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden") + // EVDO Rev. B + NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b") + // LTE + NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte") + // EHRPD + NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd") + // HSPAP + NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap") + // GSM + NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm") + // TD-SCDMA + NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma") + // IWLAN + NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa") + // LTE CA + NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca") +) + +// NetHostCarrierName returns an attribute KeyValue conforming to the +// "net.host.carrier.name" semantic conventions. It represents the name of the +// mobile carrier. +func NetHostCarrierName(val string) attribute.KeyValue { + return NetHostCarrierNameKey.String(val) +} + +// NetHostCarrierMcc returns an attribute KeyValue conforming to the +// "net.host.carrier.mcc" semantic conventions. It represents the mobile +// carrier country code. +func NetHostCarrierMcc(val string) attribute.KeyValue { + return NetHostCarrierMccKey.String(val) +} + +// NetHostCarrierMnc returns an attribute KeyValue conforming to the +// "net.host.carrier.mnc" semantic conventions. It represents the mobile +// carrier network code. +func NetHostCarrierMnc(val string) attribute.KeyValue { + return NetHostCarrierMncKey.String(val) +} + +// NetHostCarrierIcc returns an attribute KeyValue conforming to the +// "net.host.carrier.icc" semantic conventions. It represents the ISO 3166-1 +// alpha-2 2-character country code associated with the mobile carrier network. +func NetHostCarrierIcc(val string) attribute.KeyValue { + return NetHostCarrierIccKey.String(val) +} + +// Semantic conventions for HTTP client and server Spans. +const ( + // HTTPRequestContentLengthKey is the attribute Key conforming to the + // "http.request_content_length" semantic conventions. It represents the + // size of the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3495 + HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") + + // HTTPResponseContentLengthKey is the attribute Key conforming to the + // "http.response_content_length" semantic conventions. It represents the + // size of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3495 + HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") +) + +// HTTPRequestContentLength returns an attribute KeyValue conforming to the +// "http.request_content_length" semantic conventions. It represents the size +// of the request payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPRequestContentLength(val int) attribute.KeyValue { + return HTTPRequestContentLengthKey.Int(val) +} + +// HTTPResponseContentLength returns an attribute KeyValue conforming to the +// "http.response_content_length" semantic conventions. It represents the size +// of the response payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPResponseContentLength(val int) attribute.KeyValue { + return HTTPResponseContentLengthKey.Int(val) +} + +// Semantic convention describing per-message attributes populated on messaging +// spans or links. +const ( + // MessagingMessageIDKey is the attribute Key conforming to the + // "messaging.message.id" semantic conventions. It represents a value used + // by the messaging system as an identifier for the message, represented as + // a string. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + MessagingMessageIDKey = attribute.Key("messaging.message.id") + + // MessagingMessageConversationIDKey is the attribute Key conforming to the + // "messaging.message.conversation_id" semantic conventions. It represents + // the [conversation ID](#conversations) identifying the conversation to + // which the message belongs, represented as a string. Sometimes called + // "Correlation ID". + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyConversationID' + MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") + + // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to + // the "messaging.message.payload_size_bytes" semantic conventions. It + // represents the (uncompressed) size of the message payload in bytes. Also + // use this attribute if it is unknown whether the compressed or + // uncompressed payload size is reported. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2738 + MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes") + + // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key + // conforming to the "messaging.message.payload_compressed_size_bytes" + // semantic conventions. It represents the compressed size of the message + // payload in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2048 + MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes") +) + +// MessagingMessageID returns an attribute KeyValue conforming to the +// "messaging.message.id" semantic conventions. It represents a value used by +// the messaging system as an identifier for the message, represented as a +// string. +func MessagingMessageID(val string) attribute.KeyValue { + return MessagingMessageIDKey.String(val) +} + +// MessagingMessageConversationID returns an attribute KeyValue conforming +// to the "messaging.message.conversation_id" semantic conventions. It +// represents the [conversation ID](#conversations) identifying the +// conversation to which the message belongs, represented as a string. +// Sometimes called "Correlation ID". +func MessagingMessageConversationID(val string) attribute.KeyValue { + return MessagingMessageConversationIDKey.String(val) +} + +// MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming +// to the "messaging.message.payload_size_bytes" semantic conventions. It +// represents the (uncompressed) size of the message payload in bytes. Also use +// this attribute if it is unknown whether the compressed or uncompressed +// payload size is reported. +func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue { + return MessagingMessagePayloadSizeBytesKey.Int(val) +} + +// MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue +// conforming to the "messaging.message.payload_compressed_size_bytes" semantic +// conventions. It represents the compressed size of the message payload in +// bytes. +func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue { + return MessagingMessagePayloadCompressedSizeBytesKey.Int(val) +} + +// Semantic convention for attributes that describe messaging destination on +// broker +const ( + // MessagingDestinationNameKey is the attribute Key conforming to the + // "messaging.destination.name" semantic conventions. It represents the + // message destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + // Note: Destination name SHOULD uniquely identify a specific queue, topic + // or other entity within the broker. If + // the broker does not have such notion, the destination name SHOULD + // uniquely identify the broker. + MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + + // MessagingDestinationTemplateKey is the attribute Key conforming to the + // "messaging.destination.template" semantic conventions. It represents the + // low cardinality representation of the messaging destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/customers/{customerID}' + // Note: Destination names could be constructed from templates. An example + // would be a destination name involving a user name or product id. + // Although the destination name in this case is of high cardinality, the + // underlying template is of low cardinality and can be effectively used + // for grouping and aggregation. + MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") + + // MessagingDestinationTemporaryKey is the attribute Key conforming to the + // "messaging.destination.temporary" semantic conventions. It represents a + // boolean that is true if the message destination is temporary and might + // not exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") + + // MessagingDestinationAnonymousKey is the attribute Key conforming to the + // "messaging.destination.anonymous" semantic conventions. It represents a + // boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") +) + +// MessagingDestinationName returns an attribute KeyValue conforming to the +// "messaging.destination.name" semantic conventions. It represents the message +// destination name +func MessagingDestinationName(val string) attribute.KeyValue { + return MessagingDestinationNameKey.String(val) +} + +// MessagingDestinationTemplate returns an attribute KeyValue conforming to +// the "messaging.destination.template" semantic conventions. It represents the +// low cardinality representation of the messaging destination name +func MessagingDestinationTemplate(val string) attribute.KeyValue { + return MessagingDestinationTemplateKey.String(val) +} + +// MessagingDestinationTemporary returns an attribute KeyValue conforming to +// the "messaging.destination.temporary" semantic conventions. It represents a +// boolean that is true if the message destination is temporary and might not +// exist anymore after messages are processed. +func MessagingDestinationTemporary(val bool) attribute.KeyValue { + return MessagingDestinationTemporaryKey.Bool(val) +} + +// MessagingDestinationAnonymous returns an attribute KeyValue conforming to +// the "messaging.destination.anonymous" semantic conventions. It represents a +// boolean that is true if the message destination is anonymous (could be +// unnamed or have auto-generated name). +func MessagingDestinationAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationAnonymousKey.Bool(val) +} + +// Semantic convention for attributes that describe messaging source on broker +const ( + // MessagingSourceNameKey is the attribute Key conforming to the + // "messaging.source.name" semantic conventions. It represents the message + // source name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + // Note: Source name SHOULD uniquely identify a specific queue, topic, or + // other entity within the broker. If + // the broker does not have such notion, the source name SHOULD uniquely + // identify the broker. + MessagingSourceNameKey = attribute.Key("messaging.source.name") + + // MessagingSourceTemplateKey is the attribute Key conforming to the + // "messaging.source.template" semantic conventions. It represents the low + // cardinality representation of the messaging source name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/customers/{customerID}' + // Note: Source names could be constructed from templates. An example would + // be a source name involving a user name or product id. Although the + // source name in this case is of high cardinality, the underlying template + // is of low cardinality and can be effectively used for grouping and + // aggregation. + MessagingSourceTemplateKey = attribute.Key("messaging.source.template") + + // MessagingSourceTemporaryKey is the attribute Key conforming to the + // "messaging.source.temporary" semantic conventions. It represents a + // boolean that is true if the message source is temporary and might not + // exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingSourceTemporaryKey = attribute.Key("messaging.source.temporary") + + // MessagingSourceAnonymousKey is the attribute Key conforming to the + // "messaging.source.anonymous" semantic conventions. It represents a + // boolean that is true if the message source is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingSourceAnonymousKey = attribute.Key("messaging.source.anonymous") +) + +// MessagingSourceName returns an attribute KeyValue conforming to the +// "messaging.source.name" semantic conventions. It represents the message +// source name +func MessagingSourceName(val string) attribute.KeyValue { + return MessagingSourceNameKey.String(val) +} + +// MessagingSourceTemplate returns an attribute KeyValue conforming to the +// "messaging.source.template" semantic conventions. It represents the low +// cardinality representation of the messaging source name +func MessagingSourceTemplate(val string) attribute.KeyValue { + return MessagingSourceTemplateKey.String(val) +} + +// MessagingSourceTemporary returns an attribute KeyValue conforming to the +// "messaging.source.temporary" semantic conventions. It represents a boolean +// that is true if the message source is temporary and might not exist anymore +// after messages are processed. +func MessagingSourceTemporary(val bool) attribute.KeyValue { + return MessagingSourceTemporaryKey.Bool(val) +} + +// MessagingSourceAnonymous returns an attribute KeyValue conforming to the +// "messaging.source.anonymous" semantic conventions. It represents a boolean +// that is true if the message source is anonymous (could be unnamed or have +// auto-generated name). +func MessagingSourceAnonymous(val bool) attribute.KeyValue { + return MessagingSourceAnonymousKey.Bool(val) +} + +// Attributes for RabbitMQ +const ( + // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key + // conforming to the "messaging.rabbitmq.destination.routing_key" semantic + // conventions. It represents the rabbitMQ message routing key. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If not empty.) + // Stability: stable + // Examples: 'myKey' + MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") +) + +// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.destination.routing_key" semantic +// conventions. It represents the rabbitMQ message routing key. +func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { + return MessagingRabbitmqDestinationRoutingKeyKey.String(val) +} + +// Attributes for Apache Kafka +const ( + // MessagingKafkaMessageKeyKey is the attribute Key conforming to the + // "messaging.kafka.message.key" semantic conventions. It represents the + // message keys in Kafka are used for grouping alike messages to ensure + // they're processed on the same partition. They differ from + // `messaging.message.id` in that they're not unique. If the key is `null`, + // the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to + // be supplied for the attribute. If the key has no unambiguous, canonical + // string form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + + // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the + // "messaging.kafka.consumer.group" semantic conventions. It represents the + // name of the Kafka Consumer Group that is handling the message. Only + // applies to consumers, not producers. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'my-group' + MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") + + // MessagingKafkaClientIDKey is the attribute Key conforming to the + // "messaging.kafka.client_id" semantic conventions. It represents the + // client ID for the Consumer or Producer that is handling the message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'client-5' + MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") + + // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to + // the "messaging.kafka.destination.partition" semantic conventions. It + // represents the partition the message is sent to. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2 + MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") + + // MessagingKafkaSourcePartitionKey is the attribute Key conforming to the + // "messaging.kafka.source.partition" semantic conventions. It represents + // the partition the message is received from. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2 + MessagingKafkaSourcePartitionKey = attribute.Key("messaging.kafka.source.partition") + + // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the + // "messaging.kafka.message.offset" semantic conventions. It represents the + // offset of a record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") + + // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the + // "messaging.kafka.message.tombstone" semantic conventions. It represents + // a boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: ConditionallyRequired (If value is `true`. When + // missing, the value is assumed to be `false`.) + // Stability: stable + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") +) + +// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the +// "messaging.kafka.message.key" semantic conventions. It represents the +// message keys in Kafka are used for grouping alike messages to ensure they're +// processed on the same partition. They differ from `messaging.message.id` in +// that they're not unique. If the key is `null`, the attribute MUST NOT be +// set. +func MessagingKafkaMessageKey(val string) attribute.KeyValue { + return MessagingKafkaMessageKeyKey.String(val) +} + +// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to +// the "messaging.kafka.consumer.group" semantic conventions. It represents the +// name of the Kafka Consumer Group that is handling the message. Only applies +// to consumers, not producers. +func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { + return MessagingKafkaConsumerGroupKey.String(val) +} + +// MessagingKafkaClientID returns an attribute KeyValue conforming to the +// "messaging.kafka.client_id" semantic conventions. It represents the client +// ID for the Consumer or Producer that is handling the message. +func MessagingKafkaClientID(val string) attribute.KeyValue { + return MessagingKafkaClientIDKey.String(val) +} + +// MessagingKafkaDestinationPartition returns an attribute KeyValue +// conforming to the "messaging.kafka.destination.partition" semantic +// conventions. It represents the partition the message is sent to. +func MessagingKafkaDestinationPartition(val int) attribute.KeyValue { + return MessagingKafkaDestinationPartitionKey.Int(val) +} + +// MessagingKafkaSourcePartition returns an attribute KeyValue conforming to +// the "messaging.kafka.source.partition" semantic conventions. It represents +// the partition the message is received from. +func MessagingKafkaSourcePartition(val int) attribute.KeyValue { + return MessagingKafkaSourcePartitionKey.Int(val) +} + +// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to +// the "messaging.kafka.message.offset" semantic conventions. It represents the +// offset of a record in the corresponding Kafka partition. +func MessagingKafkaMessageOffset(val int) attribute.KeyValue { + return MessagingKafkaMessageOffsetKey.Int(val) +} + +// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming +// to the "messaging.kafka.message.tombstone" semantic conventions. It +// represents a boolean that is true if the message is a tombstone. +func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { + return MessagingKafkaMessageTombstoneKey.Bool(val) +} + +// Attributes for Apache RocketMQ +const ( + // MessagingRocketmqNamespaceKey is the attribute Key conforming to the + // "messaging.rocketmq.namespace" semantic conventions. It represents the + // namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myNamespace' + MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") + + // MessagingRocketmqClientGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.client_group" semantic conventions. It represents + // the name of the RocketMQ producer/consumer group that is handling the + // message. The client type is identified by the SpanKind. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myConsumerGroup' + MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") + + // MessagingRocketmqClientIDKey is the attribute Key conforming to the + // "messaging.rocketmq.client_id" semantic conventions. It represents the + // unique identifier for each client. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myhost@8742@s8083jm' + MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id") + + // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delivery_timestamp" + // semantic conventions. It represents the timestamp in milliseconds that + // the delay message is expected to be delivered to consumer. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the message type is delay + // and delay time level is not specified.) + // Stability: stable + // Examples: 1665987217045 + MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + + // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delay_time_level" semantic + // conventions. It represents the delay time level for delay message, which + // determines the message delay time. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the message type is delay + // and delivery timestamp is not specified.) + // Stability: stable + // Examples: 3 + MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + + // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.message.group" semantic conventions. It represents + // the it is essential for FIFO message. Messages that belong to the same + // message group are always processed one by one within the same consumer + // group. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If the message type is FIFO.) + // Stability: stable + // Examples: 'myMessageGroup' + MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + + // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the + // "messaging.rocketmq.message.type" semantic conventions. It represents + // the type of message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + + // MessagingRocketmqMessageTagKey is the attribute Key conforming to the + // "messaging.rocketmq.message.tag" semantic conventions. It represents the + // secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'tagA' + MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + + // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the + // "messaging.rocketmq.message.keys" semantic conventions. It represents + // the key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'keyA', 'keyB' + MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + + // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to + // the "messaging.rocketmq.consumption_model" semantic conventions. It + // represents the model of message consumption. This only applies to + // consumer spans. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") +) + +var ( + // Normal message + MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") + // FIFO message + MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") + // Delay message + MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") + // Transaction message + MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") +) + +var ( + // Clustering consumption model + MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") + // Broadcasting consumption model + MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") +) + +// MessagingRocketmqNamespace returns an attribute KeyValue conforming to +// the "messaging.rocketmq.namespace" semantic conventions. It represents the +// namespace of RocketMQ resources, resources in different namespaces are +// individual. +func MessagingRocketmqNamespace(val string) attribute.KeyValue { + return MessagingRocketmqNamespaceKey.String(val) +} + +// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.client_group" semantic conventions. It represents +// the name of the RocketMQ producer/consumer group that is handling the +// message. The client type is identified by the SpanKind. +func MessagingRocketmqClientGroup(val string) attribute.KeyValue { + return MessagingRocketmqClientGroupKey.String(val) +} + +// MessagingRocketmqClientID returns an attribute KeyValue conforming to the +// "messaging.rocketmq.client_id" semantic conventions. It represents the +// unique identifier for each client. +func MessagingRocketmqClientID(val string) attribute.KeyValue { + return MessagingRocketmqClientIDKey.String(val) +} + +// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic +// conventions. It represents the timestamp in milliseconds that the delay +// message is expected to be delivered to consumer. +func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { + return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) +} + +// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delay_time_level" semantic +// conventions. It represents the delay time level for delay message, which +// determines the message delay time. +func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { + return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) +} + +// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.group" semantic conventions. It represents +// the it is essential for FIFO message. Messages that belong to the same +// message group are always processed one by one within the same consumer +// group. +func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { + return MessagingRocketmqMessageGroupKey.String(val) +} + +// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.tag" semantic conventions. It represents the +// secondary classifier of message besides topic. +func MessagingRocketmqMessageTag(val string) attribute.KeyValue { + return MessagingRocketmqMessageTagKey.String(val) +} + +// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.keys" semantic conventions. It represents +// the key(s) of message, another way to mark message besides message id. +func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { + return MessagingRocketmqMessageKeysKey.StringSlice(val) +} + +// Describes user-agent attributes. +const ( + // UserAgentOriginalKey is the attribute Key conforming to the + // "user_agent.original" semantic conventions. It represents the value of + // the [HTTP + // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) + // header sent by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' + UserAgentOriginalKey = attribute.Key("user_agent.original") +) + +// UserAgentOriginal returns an attribute KeyValue conforming to the +// "user_agent.original" semantic conventions. It represents the value of the +// [HTTP +// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) +// header sent by the client. +func UserAgentOriginal(val string) attribute.KeyValue { + return UserAgentOriginalKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go similarity index 85% rename from vendor/go.opentelemetry.io/otel/semconv/v1.12.0/doc.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go index 181fcc9c52..359c5a6962 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/doc.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go @@ -16,5 +16,5 @@ // // OpenTelemetry semantic conventions are agreed standardized naming // patterns for OpenTelemetry things. This package represents the conventions -// as of the v1.12.0 version of the OpenTelemetry specification. -package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0" +// as of the v1.20.0 version of the OpenTelemetry specification. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go new file mode 100644 index 0000000000..8ac9350d2b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go @@ -0,0 +1,199 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" + +import "go.opentelemetry.io/otel/attribute" + +// This semantic convention defines the attributes used to represent a feature +// flag evaluation as an event. +const ( + // FeatureFlagKeyKey is the attribute Key conforming to the + // "feature_flag.key" semantic conventions. It represents the unique + // identifier of the feature flag. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'logo-color' + FeatureFlagKeyKey = attribute.Key("feature_flag.key") + + // FeatureFlagProviderNameKey is the attribute Key conforming to the + // "feature_flag.provider_name" semantic conventions. It represents the + // name of the service provider that performs the flag evaluation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'Flag Manager' + FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") + + // FeatureFlagVariantKey is the attribute Key conforming to the + // "feature_flag.variant" semantic conventions. It represents the sHOULD be + // a semantic identifier for a value. If one is unavailable, a stringified + // version of the value can be used. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'red', 'true', 'on' + // Note: A semantic identifier, commonly referred to as a variant, provides + // a means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant `red` maybe be used for the value `#c05543`. + // + // A stringified version of the value can be used in situations where a + // semantic identifier is unavailable. String representation of the value + // should be determined by the implementer. + FeatureFlagVariantKey = attribute.Key("feature_flag.variant") +) + +// FeatureFlagKey returns an attribute KeyValue conforming to the +// "feature_flag.key" semantic conventions. It represents the unique identifier +// of the feature flag. +func FeatureFlagKey(val string) attribute.KeyValue { + return FeatureFlagKeyKey.String(val) +} + +// FeatureFlagProviderName returns an attribute KeyValue conforming to the +// "feature_flag.provider_name" semantic conventions. It represents the name of +// the service provider that performs the flag evaluation. +func FeatureFlagProviderName(val string) attribute.KeyValue { + return FeatureFlagProviderNameKey.String(val) +} + +// FeatureFlagVariant returns an attribute KeyValue conforming to the +// "feature_flag.variant" semantic conventions. It represents the sHOULD be a +// semantic identifier for a value. If one is unavailable, a stringified +// version of the value can be used. +func FeatureFlagVariant(val string) attribute.KeyValue { + return FeatureFlagVariantKey.String(val) +} + +// RPC received/sent message. +const ( + // MessageTypeKey is the attribute Key conforming to the "message.type" + // semantic conventions. It represents the whether this is a received or + // sent message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessageTypeKey = attribute.Key("message.type") + + // MessageIDKey is the attribute Key conforming to the "message.id" + // semantic conventions. It represents the mUST be calculated as two + // different counters starting from `1` one for sent messages and one for + // received message. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Note: This way we guarantee that the values will be consistent between + // different implementations. + MessageIDKey = attribute.Key("message.id") + + // MessageCompressedSizeKey is the attribute Key conforming to the + // "message.compressed_size" semantic conventions. It represents the + // compressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + MessageCompressedSizeKey = attribute.Key("message.compressed_size") + + // MessageUncompressedSizeKey is the attribute Key conforming to the + // "message.uncompressed_size" semantic conventions. It represents the + // uncompressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") +) + +var ( + // sent + MessageTypeSent = MessageTypeKey.String("SENT") + // received + MessageTypeReceived = MessageTypeKey.String("RECEIVED") +) + +// MessageID returns an attribute KeyValue conforming to the "message.id" +// semantic conventions. It represents the mUST be calculated as two different +// counters starting from `1` one for sent messages and one for received +// message. +func MessageID(val int) attribute.KeyValue { + return MessageIDKey.Int(val) +} + +// MessageCompressedSize returns an attribute KeyValue conforming to the +// "message.compressed_size" semantic conventions. It represents the compressed +// size of the message in bytes. +func MessageCompressedSize(val int) attribute.KeyValue { + return MessageCompressedSizeKey.Int(val) +} + +// MessageUncompressedSize returns an attribute KeyValue conforming to the +// "message.uncompressed_size" semantic conventions. It represents the +// uncompressed size of the message in bytes. +func MessageUncompressedSize(val int) attribute.KeyValue { + return MessageUncompressedSizeKey.Int(val) +} + +// The attributes used to report a single exception associated with a span. +const ( + // ExceptionEscapedKey is the attribute Key conforming to the + // "exception.escaped" semantic conventions. It represents the sHOULD be + // set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: An exception is considered to have escaped (or left) the scope of + // a span, + // if that span is ended while the exception is still logically "in + // flight". + // This may be actually "in flight" in some languages (e.g. if the + // exception + // is passed to a Context manager's `__exit__` method in Python) but will + // usually be caught at the point of recording the exception in most + // languages. + // + // It is usually not possible to determine at the point where an exception + // is thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending + // the span, + // as done in the [example above](#recording-an-exception). + // + // It follows that an exception may still escape the scope of the span + // even if the `exception.escaped` attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + ExceptionEscapedKey = attribute.Key("exception.escaped") +) + +// ExceptionEscaped returns an attribute KeyValue conforming to the +// "exception.escaped" semantic conventions. It represents the sHOULD be set to +// true if the exception event is recorded at a point where it is known that +// the exception is escaping the scope of the span. +func ExceptionEscaped(val bool) attribute.KeyValue { + return ExceptionEscapedKey.Bool(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go similarity index 91% rename from vendor/go.opentelemetry.io/otel/semconv/v1.12.0/exception.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go index d689270943..09ff4dfdbf 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/exception.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0" +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" const ( // ExceptionEventName is the name of the Span event representing an exception. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go new file mode 100644 index 0000000000..342aede95f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go @@ -0,0 +1,21 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" + +// HTTP scheme attributes. +var ( + HTTPSchemeHTTP = HTTPSchemeKey.String("http") + HTTPSchemeHTTPS = HTTPSchemeKey.String("https") +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go new file mode 100644 index 0000000000..a2b906742a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go @@ -0,0 +1,2071 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" + +import "go.opentelemetry.io/otel/attribute" + +// The web browser in which the application represented by the resource is +// running. The `browser.*` attributes MUST be used only for resources that +// represent applications running in a web browser (regardless of whether +// running on a mobile or desktop device). +const ( + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.brands`). + BrowserBrandsKey = attribute.Key("browser.brands") + + // BrowserPlatformKey is the attribute Key conforming to the + // "browser.platform" semantic conventions. It represents the platform on + // which the browser is running + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute + // SHOULD be left unset in order for the values to be consistent. + // The list of possible values is defined in the [W3C User-Agent Client + // Hints + // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). + // Note that some (but not all) of these values can overlap with values in + // the [`os.type` and `os.name` attributes](./os.md). However, for + // consistency, the values in the `browser.platform` attribute should + // capture the exact value that the user agent provides. + BrowserPlatformKey = attribute.Key("browser.platform") + + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the + // browser is running on a mobile device + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.mobile`). If unavailable, this attribute + // SHOULD be left unset. + BrowserMobileKey = attribute.Key("browser.mobile") + + // BrowserLanguageKey is the attribute Key conforming to the + // "browser.language" semantic conventions. It represents the preferred + // language of the user using the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") +) + +// BrowserBrands returns an attribute KeyValue conforming to the +// "browser.brands" semantic conventions. It represents the array of brand name +// and version separated by a space +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the +// "browser.mobile" semantic conventions. It represents a boolean that is true +// if the browser is running on a mobile device +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred +// language of the user using the browser +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} + +// A cloud environment (e.g. GCP, Azure, AWS) +const ( + // CloudProviderKey is the attribute Key conforming to the "cloud.provider" + // semantic conventions. It represents the name of the cloud provider. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + CloudProviderKey = attribute.Key("cloud.provider") + + // CloudAccountIDKey is the attribute Key conforming to the + // "cloud.account.id" semantic conventions. It represents the cloud account + // ID the resource is assigned to. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '111111111111', 'opentelemetry' + CloudAccountIDKey = attribute.Key("cloud.account.id") + + // CloudRegionKey is the attribute Key conforming to the "cloud.region" + // semantic conventions. It represents the geographical region the resource + // is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for + // example [Alibaba Cloud + // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS + // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), + // [Azure + // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/), + // [Google Cloud regions](https://cloud.google.com/about/locations), or + // [Tencent Cloud + // regions](https://www.tencentcloud.com/document/product/213/6091). + CloudRegionKey = attribute.Key("cloud.region") + + // CloudResourceIDKey is the attribute Key conforming to the + // "cloud.resource_id" semantic conventions. It represents the cloud + // provider-specific native identifier of the monitored cloud resource + // (e.g. an + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // on AWS, a [fully qualified resource + // ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id) + // on Azure, a [full resource + // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) + // on GCP) + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', + // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', + // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/' + // Note: On some cloud providers, it may not be possible to determine the + // full ID at startup, + // so it may be necessary to set `cloud.resource_id` as a span attribute + // instead. + // + // The exact value to use for `cloud.resource_id` depends on the cloud + // provider. + // The following well-known definitions MUST be used if you set this + // attribute and they apply: + // + // * **AWS Lambda:** The function + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + // Take care not to use the "invoked ARN" directly but replace any + // [alias + // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) + // with the resolved function version, as the same runtime instance may + // be invokable with + // multiple different aliases. + // * **GCP:** The [URI of the + // resource](https://cloud.google.com/iam/docs/full-resource-names) + // * **Azure:** The [Fully Qualified Resource + // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id) + // of the invoked function, + // *not* the function app, having the form + // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider. + CloudResourceIDKey = attribute.Key("cloud.resource_id") + + // CloudAvailabilityZoneKey is the attribute Key conforming to the + // "cloud.availability_zone" semantic conventions. It represents the cloud + // regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the + // resource is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and Google + // Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + + // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" + // semantic conventions. It represents the cloud platform in use. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") +) + +var ( + // Alibaba Cloud + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + CloudProviderGCP = CloudProviderKey.String("gcp") + // Heroku Platform as a Service + CloudProviderHeroku = CloudProviderKey.String("heroku") + // IBM Cloud + CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") + // Tencent Cloud + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +var ( + // Alibaba Cloud Elastic Compute Service + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") + // Red Hat OpenShift on Alibaba Cloud + CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") + // AWS Elastic Compute Cloud + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Red Hat OpenShift on AWS (ROSA) + CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") + // Azure Virtual Machines + CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + // Azure Container Instances + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + // Azure Kubernetes Service + CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + // Azure Functions + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + // Azure App Service + CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + // Azure Red Hat OpenShift + CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") + // Google Cloud Compute Engine (GCE) + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Red Hat OpenShift on Google Cloud + CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") + // Red Hat OpenShift on IBM Cloud + CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") + // Tencent Cloud Cloud Virtual Machine (CVM) + CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") +) + +// CloudAccountID returns an attribute KeyValue conforming to the +// "cloud.account.id" semantic conventions. It represents the cloud account ID +// the resource is assigned to. +func CloudAccountID(val string) attribute.KeyValue { + return CloudAccountIDKey.String(val) +} + +// CloudRegion returns an attribute KeyValue conforming to the +// "cloud.region" semantic conventions. It represents the geographical region +// the resource is running. +func CloudRegion(val string) attribute.KeyValue { + return CloudRegionKey.String(val) +} + +// CloudResourceID returns an attribute KeyValue conforming to the +// "cloud.resource_id" semantic conventions. It represents the cloud +// provider-specific native identifier of the monitored cloud resource (e.g. an +// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) +// on AWS, a [fully qualified resource +// ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id) +// on Azure, a [full resource +// name](https://cloud.google.com/apis/design/resource_names#full_resource_name) +// on GCP) +func CloudResourceID(val string) attribute.KeyValue { + return CloudResourceIDKey.String(val) +} + +// CloudAvailabilityZone returns an attribute KeyValue conforming to the +// "cloud.availability_zone" semantic conventions. It represents the cloud +// regions often have multiple, isolated locations known as zones to increase +// availability. Availability zone represents the zone where the resource is +// running. +func CloudAvailabilityZone(val string) attribute.KeyValue { + return CloudAvailabilityZoneKey.String(val) +} + +// Resources used by AWS Elastic Container Service (ECS). +const ( + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container + // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS + // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch + // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) + // for an ECS task. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + + // AWSECSTaskARNKey is the attribute Key conforming to the + // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an + // [ECS task + // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the task + // definition family this task definition is a member of. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-family' + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision + // for this task definition. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '8', '26' + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") +) + +var ( + // ec2 + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container +// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) +} + +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS +// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) +} + +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS +// task +// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) +} + +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the task +// definition family this task definition is a member of. +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// this task definition. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + +// Resources used by AWS Elastic Kubernetes Service (EKS). +const ( + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an + // EKS cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") +) + +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + +// Resources specific to Amazon Web Services. +const ( + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of + // the AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like + // multi-container applications, where a single application has sidecar + // containers, and each write to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon + // Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the [log group ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) + // of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of + // the AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the [log stream ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + // One log group can contain several log streams, so these ARNs necessarily + // identify both a log group and a log stream. + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") +) + +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of +// the AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} + +// Heroku dyno metadata +const ( + // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the + // "heroku.release.creation_timestamp" semantic conventions. It represents + // the time and date the release was created + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2022-10-23T18:00:42Z' + HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") + + // HerokuReleaseCommitKey is the attribute Key conforming to the + // "heroku.release.commit" semantic conventions. It represents the commit + // hash for the current release + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' + HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") + + // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" + // semantic conventions. It represents the unique identifier for the + // application + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' + HerokuAppIDKey = attribute.Key("heroku.app.id") +) + +// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming +// to the "heroku.release.creation_timestamp" semantic conventions. It +// represents the time and date the release was created +func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { + return HerokuReleaseCreationTimestampKey.String(val) +} + +// HerokuReleaseCommit returns an attribute KeyValue conforming to the +// "heroku.release.commit" semantic conventions. It represents the commit hash +// for the current release +func HerokuReleaseCommit(val string) attribute.KeyValue { + return HerokuReleaseCommitKey.String(val) +} + +// HerokuAppID returns an attribute KeyValue conforming to the +// "heroku.app.id" semantic conventions. It represents the unique identifier +// for the application +func HerokuAppID(val string) attribute.KeyValue { + return HerokuAppIDKey.String(val) +} + +// A container instance. +const ( + // ContainerNameKey is the attribute Key conforming to the "container.name" + // semantic conventions. It represents the container name used by container + // runtime. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-autoconf' + ContainerNameKey = attribute.Key("container.name") + + // ContainerIDKey is the attribute Key conforming to the "container.id" + // semantic conventions. It represents the container ID. Usually a UUID, as + // for example used to [identify Docker + // containers](https://docs.docker.com/engine/reference/run/#container-identification). + // The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'a3bf90e006b2' + ContainerIDKey = attribute.Key("container.id") + + // ContainerRuntimeKey is the attribute Key conforming to the + // "container.runtime" semantic conventions. It represents the container + // runtime managing this container. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'docker', 'containerd', 'rkt' + ContainerRuntimeKey = attribute.Key("container.runtime") + + // ContainerImageNameKey is the attribute Key conforming to the + // "container.image.name" semantic conventions. It represents the name of + // the image the container was built on. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'gcr.io/opentelemetry/operator' + ContainerImageNameKey = attribute.Key("container.image.name") + + // ContainerImageTagKey is the attribute Key conforming to the + // "container.image.tag" semantic conventions. It represents the container + // image tag. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0.1' + ContainerImageTagKey = attribute.Key("container.image.tag") +) + +// ContainerName returns an attribute KeyValue conforming to the +// "container.name" semantic conventions. It represents the container name used +// by container runtime. +func ContainerName(val string) attribute.KeyValue { + return ContainerNameKey.String(val) +} + +// ContainerID returns an attribute KeyValue conforming to the +// "container.id" semantic conventions. It represents the container ID. Usually +// a UUID, as for example used to [identify Docker +// containers](https://docs.docker.com/engine/reference/run/#container-identification). +// The UUID might be abbreviated. +func ContainerID(val string) attribute.KeyValue { + return ContainerIDKey.String(val) +} + +// ContainerRuntime returns an attribute KeyValue conforming to the +// "container.runtime" semantic conventions. It represents the container +// runtime managing this container. +func ContainerRuntime(val string) attribute.KeyValue { + return ContainerRuntimeKey.String(val) +} + +// ContainerImageName returns an attribute KeyValue conforming to the +// "container.image.name" semantic conventions. It represents the name of the +// image the container was built on. +func ContainerImageName(val string) attribute.KeyValue { + return ContainerImageNameKey.String(val) +} + +// ContainerImageTag returns an attribute KeyValue conforming to the +// "container.image.tag" semantic conventions. It represents the container +// image tag. +func ContainerImageTag(val string) attribute.KeyValue { + return ContainerImageTagKey.String(val) +} + +// The software deployment. +const ( + // DeploymentEnvironmentKey is the attribute Key conforming to the + // "deployment.environment" semantic conventions. It represents the name of + // the [deployment + // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka + // deployment tier). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'staging', 'production' + DeploymentEnvironmentKey = attribute.Key("deployment.environment") +) + +// DeploymentEnvironment returns an attribute KeyValue conforming to the +// "deployment.environment" semantic conventions. It represents the name of the +// [deployment +// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka +// deployment tier). +func DeploymentEnvironment(val string) attribute.KeyValue { + return DeploymentEnvironmentKey.String(val) +} + +// The device on which the process represented by this resource is running. +const ( + // DeviceIDKey is the attribute Key conforming to the "device.id" semantic + // conventions. It represents a unique identifier representing the device + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values + // outlined below. This value is not an advertising identifier and MUST NOT + // be used as such. On iOS (Swift or Objective-C), this value MUST be equal + // to the [vendor + // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). + // On Android (Java or Kotlin), this value MUST be equal to the Firebase + // Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found + // [here](https://developer.android.com/training/articles/user-data-ids) on + // best practices and exact implementation details. Caution should be taken + // when storing personal data or anything which can identify a user. GDPR + // and data protection laws may apply, ensure you do your own due + // diligence. + DeviceIDKey = attribute.Key("device.id") + + // DeviceModelIdentifierKey is the attribute Key conforming to the + // "device.model.identifier" semantic conventions. It represents the model + // identifier for the device + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine readable version + // of the model identifier rather than the market or consumer-friendly name + // of the device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + + // DeviceModelNameKey is the attribute Key conforming to the + // "device.model.name" semantic conventions. It represents the marketing + // name for the device model + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human readable version of + // the device model rather than a machine readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") + + // DeviceManufacturerKey is the attribute Key conforming to the + // "device.manufacturer" semantic conventions. It represents the name of + // the device manufacturer + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via + // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). + // iOS apps SHOULD hardcode the value `Apple`. + DeviceManufacturerKey = attribute.Key("device.manufacturer") +) + +// DeviceID returns an attribute KeyValue conforming to the "device.id" +// semantic conventions. It represents a unique identifier representing the +// device +func DeviceID(val string) attribute.KeyValue { + return DeviceIDKey.String(val) +} + +// DeviceModelIdentifier returns an attribute KeyValue conforming to the +// "device.model.identifier" semantic conventions. It represents the model +// identifier for the device +func DeviceModelIdentifier(val string) attribute.KeyValue { + return DeviceModelIdentifierKey.String(val) +} + +// DeviceModelName returns an attribute KeyValue conforming to the +// "device.model.name" semantic conventions. It represents the marketing name +// for the device model +func DeviceModelName(val string) attribute.KeyValue { + return DeviceModelNameKey.String(val) +} + +// DeviceManufacturer returns an attribute KeyValue conforming to the +// "device.manufacturer" semantic conventions. It represents the name of the +// device manufacturer +func DeviceManufacturer(val string) attribute.KeyValue { + return DeviceManufacturerKey.String(val) +} + +// A serverless instance. +const ( + // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic + // conventions. It represents the name of the single function that this + // runtime instance executes. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the + // FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-general.md#source-code-attributes) + // span attributes). + // + // For some cloud providers, the above definition is ambiguous. The + // following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud + // providers/products: + // + // * **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `cloud.resource_id` attribute). + FaaSNameKey = attribute.Key("faas.name") + + // FaaSVersionKey is the attribute Key conforming to the "faas.version" + // semantic conventions. It represents the immutable version of the + // function being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use: + // + // * **AWS Lambda:** The [function + // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) + // (an integer represented as a decimal string). + // * **Google Cloud Run:** The + // [revision](https://cloud.google.com/run/docs/managing/revisions) + // (i.e., the function name plus the revision suffix). + // * **Google Cloud Functions:** The value of the + // [`K_REVISION` environment + // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). + // * **Azure Functions:** Not applicable. Do not set this attribute. + FaaSVersionKey = attribute.Key("faas.version") + + // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" + // semantic conventions. It represents the execution environment ID as a + // string, that will be potentially reused for other invocations to the + // same function/function version. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note: * **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + + // FaaSMaxMemoryKey is the attribute Key conforming to the + // "faas.max_memory" semantic conventions. It represents the amount of + // memory available to the serverless function converted to Bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 134217728 + // Note: It's recommended to set this attribute since e.g. too little + // memory can easily stop a Java AWS Lambda function from working + // correctly. On AWS Lambda, the environment variable + // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must + // be multiplied by 1,048,576). + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") +) + +// FaaSName returns an attribute KeyValue conforming to the "faas.name" +// semantic conventions. It represents the name of the single function that +// this runtime instance executes. +func FaaSName(val string) attribute.KeyValue { + return FaaSNameKey.String(val) +} + +// FaaSVersion returns an attribute KeyValue conforming to the +// "faas.version" semantic conventions. It represents the immutable version of +// the function being executed. +func FaaSVersion(val string) attribute.KeyValue { + return FaaSVersionKey.String(val) +} + +// FaaSInstance returns an attribute KeyValue conforming to the +// "faas.instance" semantic conventions. It represents the execution +// environment ID as a string, that will be potentially reused for other +// invocations to the same function/function version. +func FaaSInstance(val string) attribute.KeyValue { + return FaaSInstanceKey.String(val) +} + +// FaaSMaxMemory returns an attribute KeyValue conforming to the +// "faas.max_memory" semantic conventions. It represents the amount of memory +// available to the serverless function converted to Bytes. +func FaaSMaxMemory(val int) attribute.KeyValue { + return FaaSMaxMemoryKey.Int(val) +} + +// A host is defined as a general computing instance. +const ( + // HostIDKey is the attribute Key conforming to the "host.id" semantic + // conventions. It represents the unique host ID. For Cloud, this must be + // the instance_id assigned by the cloud provider. For non-containerized + // systems, this should be the `machine-id`. See the table below for the + // sources to use to determine the `machine-id` based on operating system. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'fdbf79e8af94cb7f9e8df36789187052' + HostIDKey = attribute.Key("host.id") + + // HostNameKey is the attribute Key conforming to the "host.name" semantic + // conventions. It represents the name of the host. On Unix systems, it may + // contain what the hostname command returns, or the fully qualified + // hostname, or another name specified by the user. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-test' + HostNameKey = attribute.Key("host.name") + + // HostTypeKey is the attribute Key conforming to the "host.type" semantic + // conventions. It represents the type of host. For Cloud, this must be the + // machine type. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'n1-standard-1' + HostTypeKey = attribute.Key("host.type") + + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is + // running on. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + HostArchKey = attribute.Key("host.arch") + + // HostImageNameKey is the attribute Key conforming to the + // "host.image.name" semantic conventions. It represents the name of the VM + // image or OS install the host was instantiated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + HostImageNameKey = attribute.Key("host.image.name") + + // HostImageIDKey is the attribute Key conforming to the "host.image.id" + // semantic conventions. It represents the vM image ID. For Cloud, this + // value is from the provider. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ami-07b06b442921831e5' + HostImageIDKey = attribute.Key("host.image.id") + + // HostImageVersionKey is the attribute Key conforming to the + // "host.image.version" semantic conventions. It represents the version + // string of the VM image as defined in [Version + // Attributes](README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0.1' + HostImageVersionKey = attribute.Key("host.image.version") +) + +var ( + // AMD64 + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + HostArchX86 = HostArchKey.String("x86") +) + +// HostID returns an attribute KeyValue conforming to the "host.id" semantic +// conventions. It represents the unique host ID. For Cloud, this must be the +// instance_id assigned by the cloud provider. For non-containerized systems, +// this should be the `machine-id`. See the table below for the sources to use +// to determine the `machine-id` based on operating system. +func HostID(val string) attribute.KeyValue { + return HostIDKey.String(val) +} + +// HostName returns an attribute KeyValue conforming to the "host.name" +// semantic conventions. It represents the name of the host. On Unix systems, +// it may contain what the hostname command returns, or the fully qualified +// hostname, or another name specified by the user. +func HostName(val string) attribute.KeyValue { + return HostNameKey.String(val) +} + +// HostType returns an attribute KeyValue conforming to the "host.type" +// semantic conventions. It represents the type of host. For Cloud, this must +// be the machine type. +func HostType(val string) attribute.KeyValue { + return HostTypeKey.String(val) +} + +// HostImageName returns an attribute KeyValue conforming to the +// "host.image.name" semantic conventions. It represents the name of the VM +// image or OS install the host was instantiated from. +func HostImageName(val string) attribute.KeyValue { + return HostImageNameKey.String(val) +} + +// HostImageID returns an attribute KeyValue conforming to the +// "host.image.id" semantic conventions. It represents the vM image ID. For +// Cloud, this value is from the provider. +func HostImageID(val string) attribute.KeyValue { + return HostImageIDKey.String(val) +} + +// HostImageVersion returns an attribute KeyValue conforming to the +// "host.image.version" semantic conventions. It represents the version string +// of the VM image as defined in [Version +// Attributes](README.md#version-attributes). +func HostImageVersion(val string) attribute.KeyValue { + return HostImageVersionKey.String(val) +} + +// A Kubernetes Cluster. +const ( + // K8SClusterNameKey is the attribute Key conforming to the + // "k8s.cluster.name" semantic conventions. It represents the name of the + // cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-cluster' + K8SClusterNameKey = attribute.Key("k8s.cluster.name") +) + +// K8SClusterName returns an attribute KeyValue conforming to the +// "k8s.cluster.name" semantic conventions. It represents the name of the +// cluster. +func K8SClusterName(val string) attribute.KeyValue { + return K8SClusterNameKey.String(val) +} + +// A Kubernetes Node object. +const ( + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" + // semantic conventions. It represents the name of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'node-1' + K8SNodeNameKey = attribute.Key("k8s.node.name") + + // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" + // semantic conventions. It represents the UID of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + K8SNodeUIDKey = attribute.Key("k8s.node.uid") +) + +// K8SNodeName returns an attribute KeyValue conforming to the +// "k8s.node.name" semantic conventions. It represents the name of the Node. +func K8SNodeName(val string) attribute.KeyValue { + return K8SNodeNameKey.String(val) +} + +// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" +// semantic conventions. It represents the UID of the Node. +func K8SNodeUID(val string) attribute.KeyValue { + return K8SNodeUIDKey.String(val) +} + +// A Kubernetes Namespace. +const ( + // K8SNamespaceNameKey is the attribute Key conforming to the + // "k8s.namespace.name" semantic conventions. It represents the name of the + // namespace that the pod is running in. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'default' + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") +) + +// K8SNamespaceName returns an attribute KeyValue conforming to the +// "k8s.namespace.name" semantic conventions. It represents the name of the +// namespace that the pod is running in. +func K8SNamespaceName(val string) attribute.KeyValue { + return K8SNamespaceNameKey.String(val) +} + +// A Kubernetes Pod object. +const ( + // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" + // semantic conventions. It represents the UID of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + + // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" + // semantic conventions. It represents the name of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-pod-autoconf' + K8SPodNameKey = attribute.Key("k8s.pod.name") +) + +// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" +// semantic conventions. It represents the UID of the Pod. +func K8SPodUID(val string) attribute.KeyValue { + return K8SPodUIDKey.String(val) +} + +// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" +// semantic conventions. It represents the name of the Pod. +func K8SPodName(val string) attribute.KeyValue { + return K8SPodNameKey.String(val) +} + +// A container in a +// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). +const ( + // K8SContainerNameKey is the attribute Key conforming to the + // "k8s.container.name" semantic conventions. It represents the name of the + // Container from Pod specification, must be unique within a Pod. Container + // runtime usually uses different globally unique name (`container.name`). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'redis' + K8SContainerNameKey = attribute.Key("k8s.container.name") + + // K8SContainerRestartCountKey is the attribute Key conforming to the + // "k8s.container.restart_count" semantic conventions. It represents the + // number of times the container was restarted. This attribute can be used + // to identify a particular container (running or stopped) within a + // container spec. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 0, 2 + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") +) + +// K8SContainerName returns an attribute KeyValue conforming to the +// "k8s.container.name" semantic conventions. It represents the name of the +// Container from Pod specification, must be unique within a Pod. Container +// runtime usually uses different globally unique name (`container.name`). +func K8SContainerName(val string) attribute.KeyValue { + return K8SContainerNameKey.String(val) +} + +// K8SContainerRestartCount returns an attribute KeyValue conforming to the +// "k8s.container.restart_count" semantic conventions. It represents the number +// of times the container was restarted. This attribute can be used to identify +// a particular container (running or stopped) within a container spec. +func K8SContainerRestartCount(val int) attribute.KeyValue { + return K8SContainerRestartCountKey.Int(val) +} + +// A Kubernetes ReplicaSet object. +const ( + // K8SReplicaSetUIDKey is the attribute Key conforming to the + // "k8s.replicaset.uid" semantic conventions. It represents the UID of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + + // K8SReplicaSetNameKey is the attribute Key conforming to the + // "k8s.replicaset.name" semantic conventions. It represents the name of + // the ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") +) + +// K8SReplicaSetUID returns an attribute KeyValue conforming to the +// "k8s.replicaset.uid" semantic conventions. It represents the UID of the +// ReplicaSet. +func K8SReplicaSetUID(val string) attribute.KeyValue { + return K8SReplicaSetUIDKey.String(val) +} + +// K8SReplicaSetName returns an attribute KeyValue conforming to the +// "k8s.replicaset.name" semantic conventions. It represents the name of the +// ReplicaSet. +func K8SReplicaSetName(val string) attribute.KeyValue { + return K8SReplicaSetNameKey.String(val) +} + +// A Kubernetes Deployment object. +const ( + // K8SDeploymentUIDKey is the attribute Key conforming to the + // "k8s.deployment.uid" semantic conventions. It represents the UID of the + // Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + + // K8SDeploymentNameKey is the attribute Key conforming to the + // "k8s.deployment.name" semantic conventions. It represents the name of + // the Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") +) + +// K8SDeploymentUID returns an attribute KeyValue conforming to the +// "k8s.deployment.uid" semantic conventions. It represents the UID of the +// Deployment. +func K8SDeploymentUID(val string) attribute.KeyValue { + return K8SDeploymentUIDKey.String(val) +} + +// K8SDeploymentName returns an attribute KeyValue conforming to the +// "k8s.deployment.name" semantic conventions. It represents the name of the +// Deployment. +func K8SDeploymentName(val string) attribute.KeyValue { + return K8SDeploymentNameKey.String(val) +} + +// A Kubernetes StatefulSet object. +const ( + // K8SStatefulSetUIDKey is the attribute Key conforming to the + // "k8s.statefulset.uid" semantic conventions. It represents the UID of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") + + // K8SStatefulSetNameKey is the attribute Key conforming to the + // "k8s.statefulset.name" semantic conventions. It represents the name of + // the StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") +) + +// K8SStatefulSetUID returns an attribute KeyValue conforming to the +// "k8s.statefulset.uid" semantic conventions. It represents the UID of the +// StatefulSet. +func K8SStatefulSetUID(val string) attribute.KeyValue { + return K8SStatefulSetUIDKey.String(val) +} + +// K8SStatefulSetName returns an attribute KeyValue conforming to the +// "k8s.statefulset.name" semantic conventions. It represents the name of the +// StatefulSet. +func K8SStatefulSetName(val string) attribute.KeyValue { + return K8SStatefulSetNameKey.String(val) +} + +// A Kubernetes DaemonSet object. +const ( + // K8SDaemonSetUIDKey is the attribute Key conforming to the + // "k8s.daemonset.uid" semantic conventions. It represents the UID of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + + // K8SDaemonSetNameKey is the attribute Key conforming to the + // "k8s.daemonset.name" semantic conventions. It represents the name of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") +) + +// K8SDaemonSetUID returns an attribute KeyValue conforming to the +// "k8s.daemonset.uid" semantic conventions. It represents the UID of the +// DaemonSet. +func K8SDaemonSetUID(val string) attribute.KeyValue { + return K8SDaemonSetUIDKey.String(val) +} + +// K8SDaemonSetName returns an attribute KeyValue conforming to the +// "k8s.daemonset.name" semantic conventions. It represents the name of the +// DaemonSet. +func K8SDaemonSetName(val string) attribute.KeyValue { + return K8SDaemonSetNameKey.String(val) +} + +// A Kubernetes Job object. +const ( + // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" + // semantic conventions. It represents the UID of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SJobUIDKey = attribute.Key("k8s.job.uid") + + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" + // semantic conventions. It represents the name of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SJobNameKey = attribute.Key("k8s.job.name") +) + +// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" +// semantic conventions. It represents the UID of the Job. +func K8SJobUID(val string) attribute.KeyValue { + return K8SJobUIDKey.String(val) +} + +// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" +// semantic conventions. It represents the name of the Job. +func K8SJobName(val string) attribute.KeyValue { + return K8SJobNameKey.String(val) +} + +// A Kubernetes CronJob object. +const ( + // K8SCronJobUIDKey is the attribute Key conforming to the + // "k8s.cronjob.uid" semantic conventions. It represents the UID of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + + // K8SCronJobNameKey is the attribute Key conforming to the + // "k8s.cronjob.name" semantic conventions. It represents the name of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") +) + +// K8SCronJobUID returns an attribute KeyValue conforming to the +// "k8s.cronjob.uid" semantic conventions. It represents the UID of the +// CronJob. +func K8SCronJobUID(val string) attribute.KeyValue { + return K8SCronJobUIDKey.String(val) +} + +// K8SCronJobName returns an attribute KeyValue conforming to the +// "k8s.cronjob.name" semantic conventions. It represents the name of the +// CronJob. +func K8SCronJobName(val string) attribute.KeyValue { + return K8SCronJobNameKey.String(val) +} + +// The operating system (OS) on which the process represented by this resource +// is running. +const ( + // OSTypeKey is the attribute Key conforming to the "os.type" semantic + // conventions. It represents the operating system type. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + OSTypeKey = attribute.Key("os.type") + + // OSDescriptionKey is the attribute Key conforming to the "os.description" + // semantic conventions. It represents the human readable (not intended to + // be parsed) OS version information, like e.g. reported by `ver` or + // `lsb_release -a` commands. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 + // LTS' + OSDescriptionKey = attribute.Key("os.description") + + // OSNameKey is the attribute Key conforming to the "os.name" semantic + // conventions. It represents the human readable operating system name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iOS', 'Android', 'Ubuntu' + OSNameKey = attribute.Key("os.name") + + // OSVersionKey is the attribute Key conforming to the "os.version" + // semantic conventions. It represents the version string of the operating + // system as defined in [Version + // Attributes](../../resource/semantic_conventions/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '14.2.1', '18.04.1' + OSVersionKey = attribute.Key("os.version") +) + +var ( + // Microsoft Windows + OSTypeWindows = OSTypeKey.String("windows") + // Linux + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + OSTypeZOS = OSTypeKey.String("z_os") +) + +// OSDescription returns an attribute KeyValue conforming to the +// "os.description" semantic conventions. It represents the human readable (not +// intended to be parsed) OS version information, like e.g. reported by `ver` +// or `lsb_release -a` commands. +func OSDescription(val string) attribute.KeyValue { + return OSDescriptionKey.String(val) +} + +// OSName returns an attribute KeyValue conforming to the "os.name" semantic +// conventions. It represents the human readable operating system name. +func OSName(val string) attribute.KeyValue { + return OSNameKey.String(val) +} + +// OSVersion returns an attribute KeyValue conforming to the "os.version" +// semantic conventions. It represents the version string of the operating +// system as defined in [Version +// Attributes](../../resource/semantic_conventions/README.md#version-attributes). +func OSVersion(val string) attribute.KeyValue { + return OSVersionKey.String(val) +} + +// An operating system process. +const ( + // ProcessPIDKey is the attribute Key conforming to the "process.pid" + // semantic conventions. It represents the process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + + // ProcessParentPIDKey is the attribute Key conforming to the + // "process.parent_pid" semantic conventions. It represents the parent + // Process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") + + // ProcessExecutableNameKey is the attribute Key conforming to the + // "process.executable.name" semantic conventions. It represents the name + // of the process executable. On Linux based systems, can be set to the + // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name + // of `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'otelcol' + ProcessExecutableNameKey = attribute.Key("process.executable.name") + + // ProcessExecutablePathKey is the attribute Key conforming to the + // "process.executable.path" semantic conventions. It represents the full + // path to the process executable. On Linux based systems, can be set to + // the target of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: '/usr/bin/cmd/otelcol' + ProcessExecutablePathKey = attribute.Key("process.executable.path") + + // ProcessCommandKey is the attribute Key conforming to the + // "process.command" semantic conventions. It represents the command used + // to launch the process (i.e. the command name). On Linux based systems, + // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can + // be set to the first parameter extracted from `GetCommandLineW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'cmd/otelcol' + ProcessCommandKey = attribute.Key("process.command") + + // ProcessCommandLineKey is the attribute Key conforming to the + // "process.command_line" semantic conventions. It represents the full + // command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of `GetCommandLineW`. + // Do not set this if you have to assemble it just for monitoring; use + // `process.command_args` instead. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + ProcessCommandLineKey = attribute.Key("process.command_line") + + // ProcessCommandArgsKey is the attribute Key conforming to the + // "process.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited + // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, + // this would be the full argv vector passed to `main`. + // + // Type: string[] + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'cmd/otecol', '--config=config.yaml' + ProcessCommandArgsKey = attribute.Key("process.command_args") + + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" + // semantic conventions. It represents the username of the user that owns + // the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'root' + ProcessOwnerKey = attribute.Key("process.owner") +) + +// ProcessPID returns an attribute KeyValue conforming to the "process.pid" +// semantic conventions. It represents the process identifier (PID). +func ProcessPID(val int) attribute.KeyValue { + return ProcessPIDKey.Int(val) +} + +// ProcessParentPID returns an attribute KeyValue conforming to the +// "process.parent_pid" semantic conventions. It represents the parent Process +// identifier (PID). +func ProcessParentPID(val int) attribute.KeyValue { + return ProcessParentPIDKey.Int(val) +} + +// ProcessExecutableName returns an attribute KeyValue conforming to the +// "process.executable.name" semantic conventions. It represents the name of +// the process executable. On Linux based systems, can be set to the `Name` in +// `proc/[pid]/status`. On Windows, can be set to the base name of +// `GetProcessImageFileNameW`. +func ProcessExecutableName(val string) attribute.KeyValue { + return ProcessExecutableNameKey.String(val) +} + +// ProcessExecutablePath returns an attribute KeyValue conforming to the +// "process.executable.path" semantic conventions. It represents the full path +// to the process executable. On Linux based systems, can be set to the target +// of `proc/[pid]/exe`. On Windows, can be set to the result of +// `GetProcessImageFileNameW`. +func ProcessExecutablePath(val string) attribute.KeyValue { + return ProcessExecutablePathKey.String(val) +} + +// ProcessCommand returns an attribute KeyValue conforming to the +// "process.command" semantic conventions. It represents the command used to +// launch the process (i.e. the command name). On Linux based systems, can be +// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to +// the first parameter extracted from `GetCommandLineW`. +func ProcessCommand(val string) attribute.KeyValue { + return ProcessCommandKey.String(val) +} + +// ProcessCommandLine returns an attribute KeyValue conforming to the +// "process.command_line" semantic conventions. It represents the full command +// used to launch the process as a single string representing the full command. +// On Windows, can be set to the result of `GetCommandLineW`. Do not set this +// if you have to assemble it just for monitoring; use `process.command_args` +// instead. +func ProcessCommandLine(val string) attribute.KeyValue { + return ProcessCommandLineKey.String(val) +} + +// ProcessCommandArgs returns an attribute KeyValue conforming to the +// "process.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) as received by +// the process. On Linux-based systems (and some other Unixoid systems +// supporting procfs), can be set according to the list of null-delimited +// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, +// this would be the full argv vector passed to `main`. +func ProcessCommandArgs(val ...string) attribute.KeyValue { + return ProcessCommandArgsKey.StringSlice(val) +} + +// ProcessOwner returns an attribute KeyValue conforming to the +// "process.owner" semantic conventions. It represents the username of the user +// that owns the process. +func ProcessOwner(val string) attribute.KeyValue { + return ProcessOwnerKey.String(val) +} + +// The single (language) runtime instance which is monitored. +const ( + // ProcessRuntimeNameKey is the attribute Key conforming to the + // "process.runtime.name" semantic conventions. It represents the name of + // the runtime of this process. For compiled native binaries, this SHOULD + // be the name of the compiler. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'OpenJDK Runtime Environment' + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + + // ProcessRuntimeVersionKey is the attribute Key conforming to the + // "process.runtime.version" semantic conventions. It represents the + // version of the runtime of this process, as returned by the runtime + // without modification. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '14.0.2' + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") +) + +// ProcessRuntimeName returns an attribute KeyValue conforming to the +// "process.runtime.name" semantic conventions. It represents the name of the +// runtime of this process. For compiled native binaries, this SHOULD be the +// name of the compiler. +func ProcessRuntimeName(val string) attribute.KeyValue { + return ProcessRuntimeNameKey.String(val) +} + +// ProcessRuntimeVersion returns an attribute KeyValue conforming to the +// "process.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without +// modification. +func ProcessRuntimeVersion(val string) attribute.KeyValue { + return ProcessRuntimeVersionKey.String(val) +} + +// ProcessRuntimeDescription returns an attribute KeyValue conforming to the +// "process.runtime.description" semantic conventions. It represents an +// additional description about the runtime of the process, for example a +// specific vendor customization of the runtime environment. +func ProcessRuntimeDescription(val string) attribute.KeyValue { + return ProcessRuntimeDescriptionKey.String(val) +} + +// A service instance. +const ( + // ServiceNameKey is the attribute Key conforming to the "service.name" + // semantic conventions. It represents the logical name of the service. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled + // services. If the value was not specified, SDKs MUST fallback to + // `unknown_service:` concatenated with + // [`process.executable.name`](process.md#process), e.g. + // `unknown_service:bash`. If `process.executable.name` is not available, + // the value MUST be set to `unknown_service`. + ServiceNameKey = attribute.Key("service.name") +) + +// ServiceName returns an attribute KeyValue conforming to the +// "service.name" semantic conventions. It represents the logical name of the +// service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) +} + +// A service instance. +const ( + // ServiceNamespaceKey is the attribute Key conforming to the + // "service.namespace" semantic conventions. It represents a namespace for + // `service.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group + // of services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` + // is expected to be unique for all services that have no explicit + // namespace defined (so the empty/unspecified namespace is simply one more + // valid namespace). Zero-length namespace string is assumed equal to + // unspecified namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + + // ServiceInstanceIDKey is the attribute Key conforming to the + // "service.instance.id" semantic conventions. It represents the string ID + // of the service instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'my-k8s-pod-deployment-1', + // '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be + // globally unique). The ID helps to distinguish instances of the same + // service that exist at the same time (e.g. instances of a horizontally + // scaled service). It is preferable for the ID to be persistent and stay + // the same for the lifetime of the service instance, however it is + // acceptable that the ID is ephemeral and changes during important + // lifetime events for the service (e.g. service restarts). If the service + // has no inherent unique ID that can be used as the value of this + // attribute it is recommended to generate a random Version 1 or Version 4 + // RFC 4122 UUID (services aiming for reproducible UUIDs may also use + // Version 5, see RFC 4122 for more recommendations). + ServiceInstanceIDKey = attribute.Key("service.instance.id") + + // ServiceVersionKey is the attribute Key conforming to the + // "service.version" semantic conventions. It represents the version string + // of the service API or implementation. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2.0.0' + ServiceVersionKey = attribute.Key("service.version") +) + +// ServiceNamespace returns an attribute KeyValue conforming to the +// "service.namespace" semantic conventions. It represents a namespace for +// `service.name`. +func ServiceNamespace(val string) attribute.KeyValue { + return ServiceNamespaceKey.String(val) +} + +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of +// the service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + +// The telemetry SDK used to capture data recorded by the instrumentation +// libraries. +const ( + // TelemetrySDKNameKey is the attribute Key conforming to the + // "telemetry.sdk.name" semantic conventions. It represents the name of the + // telemetry SDK as defined above. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'opentelemetry' + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + + // TelemetrySDKLanguageKey is the attribute Key conforming to the + // "telemetry.sdk.language" semantic conventions. It represents the + // language of the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + + // TelemetrySDKVersionKey is the attribute Key conforming to the + // "telemetry.sdk.version" semantic conventions. It represents the version + // string of the telemetry SDK. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '1.2.3' + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") +) + +var ( + // cpp + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // webjs + TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") + // swift + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") +) + +// TelemetrySDKName returns an attribute KeyValue conforming to the +// "telemetry.sdk.name" semantic conventions. It represents the name of the +// telemetry SDK as defined above. +func TelemetrySDKName(val string) attribute.KeyValue { + return TelemetrySDKNameKey.String(val) +} + +// TelemetrySDKVersion returns an attribute KeyValue conforming to the +// "telemetry.sdk.version" semantic conventions. It represents the version +// string of the telemetry SDK. +func TelemetrySDKVersion(val string) attribute.KeyValue { + return TelemetrySDKVersionKey.String(val) +} + +// The telemetry SDK used to capture data recorded by the instrumentation +// libraries. +const ( + // TelemetryAutoVersionKey is the attribute Key conforming to the + // "telemetry.auto.version" semantic conventions. It represents the version + // string of the auto instrumentation agent, if used. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.2.3' + TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") +) + +// TelemetryAutoVersion returns an attribute KeyValue conforming to the +// "telemetry.auto.version" semantic conventions. It represents the version +// string of the auto instrumentation agent, if used. +func TelemetryAutoVersion(val string) attribute.KeyValue { + return TelemetryAutoVersionKey.String(val) +} + +// Resource describing the packaged software running the application code. Web +// engines are typically executed using process.runtime. +const ( + // WebEngineNameKey is the attribute Key conforming to the "webengine.name" + // semantic conventions. It represents the name of the web engine. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'WildFly' + WebEngineNameKey = attribute.Key("webengine.name") + + // WebEngineVersionKey is the attribute Key conforming to the + // "webengine.version" semantic conventions. It represents the version of + // the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '21.0.0' + WebEngineVersionKey = attribute.Key("webengine.version") + + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the + // additional description of the web engine (e.g. detailed version and + // edition information). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final' + WebEngineDescriptionKey = attribute.Key("webengine.description") +) + +// WebEngineName returns an attribute KeyValue conforming to the +// "webengine.name" semantic conventions. It represents the name of the web +// engine. +func WebEngineName(val string) attribute.KeyValue { + return WebEngineNameKey.String(val) +} + +// WebEngineVersion returns an attribute KeyValue conforming to the +// "webengine.version" semantic conventions. It represents the version of the +// web engine. +func WebEngineVersion(val string) attribute.KeyValue { + return WebEngineVersionKey.String(val) +} + +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition +// information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's +// concepts. +const ( + // OTelScopeNameKey is the attribute Key conforming to the + // "otel.scope.name" semantic conventions. It represents the name of the + // instrumentation scope - (`InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'io.opentelemetry.contrib.mongodb' + OTelScopeNameKey = attribute.Key("otel.scope.name") + + // OTelScopeVersionKey is the attribute Key conforming to the + // "otel.scope.version" semantic conventions. It represents the version of + // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0.0' + OTelScopeVersionKey = attribute.Key("otel.scope.version") +) + +// OTelScopeName returns an attribute KeyValue conforming to the +// "otel.scope.name" semantic conventions. It represents the name of the +// instrumentation scope - (`InstrumentationScope.Name` in OTLP). +func OTelScopeName(val string) attribute.KeyValue { + return OTelScopeNameKey.String(val) +} + +// OTelScopeVersion returns an attribute KeyValue conforming to the +// "otel.scope.version" semantic conventions. It represents the version of the +// instrumentation scope - (`InstrumentationScope.Version` in OTLP). +func OTelScopeVersion(val string) attribute.KeyValue { + return OTelScopeVersionKey.String(val) +} + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry +// Scope's concepts. +const ( + // OTelLibraryNameKey is the attribute Key conforming to the + // "otel.library.name" semantic conventions. It represents the deprecated, + // use the `otel.scope.name` attribute. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'io.opentelemetry.contrib.mongodb' + OTelLibraryNameKey = attribute.Key("otel.library.name") + + // OTelLibraryVersionKey is the attribute Key conforming to the + // "otel.library.version" semantic conventions. It represents the + // deprecated, use the `otel.scope.version` attribute. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '1.0.0' + OTelLibraryVersionKey = attribute.Key("otel.library.version") +) + +// OTelLibraryName returns an attribute KeyValue conforming to the +// "otel.library.name" semantic conventions. It represents the deprecated, use +// the `otel.scope.name` attribute. +func OTelLibraryName(val string) attribute.KeyValue { + return OTelLibraryNameKey.String(val) +} + +// OTelLibraryVersion returns an attribute KeyValue conforming to the +// "otel.library.version" semantic conventions. It represents the deprecated, +// use the `otel.scope.version` attribute. +func OTelLibraryVersion(val string) attribute.KeyValue { + return OTelLibraryVersionKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go similarity index 86% rename from vendor/go.opentelemetry.io/otel/semconv/v1.12.0/schema.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go index 2f2a019e43..e449e5c3b9 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/schema.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go @@ -12,9 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0" +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" // SchemaURL is the schema URL that matches the version of the semantic conventions // that this package defines. Semconv packages starting from v1.4.0 must declare // non-empty schema URL in the form https://opentelemetry.io/schemas/ -const SchemaURL = "https://opentelemetry.io/schemas/1.12.0" +const SchemaURL = "https://opentelemetry.io/schemas/1.20.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go new file mode 100644 index 0000000000..8517741485 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go @@ -0,0 +1,2610 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" + +import "go.opentelemetry.io/otel/attribute" + +// The shared attributes used to report a single exception associated with a +// span or log. +const ( + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of the exception (its + // fully-qualified class name, if applicable). The dynamic type of the + // exception should be preferred over the static type in languages that + // support it. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + ExceptionTypeKey = attribute.Key("exception.type") + + // ExceptionMessageKey is the attribute Key conforming to the + // "exception.message" semantic conventions. It represents the exception + // message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str + // implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + + // ExceptionStacktraceKey is the attribute Key conforming to the + // "exception.stacktrace" semantic conventions. It represents a stacktrace + // as a string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") +) + +// ExceptionType returns an attribute KeyValue conforming to the +// "exception.type" semantic conventions. It represents the type of the +// exception (its fully-qualified class name, if applicable). The dynamic type +// of the exception should be preferred over the static type in languages that +// support it. +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) +} + +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception +// message. +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) +} + +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) +} + +// The attributes described in this section are rather generic. They may be +// used in any Log Record they apply to. +const ( + // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" + // semantic conventions. It represents a unique identifier for the Log + // Record. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' + // Note: If an id is provided, other log records with the same id will be + // considered duplicates and can be removed safely. This means, that two + // distinguishable log records MUST have different values. + // The id MAY be an [Universally Unique Lexicographically Sortable + // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers + // (e.g. UUID) may be used as needed. + LogRecordUIDKey = attribute.Key("log.record.uid") +) + +// LogRecordUID returns an attribute KeyValue conforming to the +// "log.record.uid" semantic conventions. It represents a unique identifier for +// the Log Record. +func LogRecordUID(val string) attribute.KeyValue { + return LogRecordUIDKey.String(val) +} + +// Span attributes used by AWS Lambda (in addition to general `faas` +// attributes). +const ( + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full + // invoked ARN as provided on the `Context` passed to the function + // (`Lambda-Runtime-Invoked-Function-ARN` header on the + // `/runtime/invocation/next` applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from `cloud.resource_id` if an alias is + // involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") +) + +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full +// invoked ARN as provided on the `Context` passed to the function +// (`Lambda-Runtime-Invoked-Function-ARN` header on the +// `/runtime/invocation/next` applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// Attributes for CloudEvents. CloudEvents is a specification on how to define +// event data in a standard way. These attributes can be attached to spans when +// performing operations with CloudEvents, regardless of the protocol being +// used. +const ( + // CloudeventsEventIDKey is the attribute Key conforming to the + // "cloudevents.event_id" semantic conventions. It represents the + // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) + // uniquely identifies the event. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") + + // CloudeventsEventSourceKey is the attribute Key conforming to the + // "cloudevents.event_source" semantic conventions. It represents the + // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) + // identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'https://github.com/cloudevents', + // '/cloudevents/spec/pull/123', 'my-service' + CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") + + // CloudeventsEventSpecVersionKey is the attribute Key conforming to the + // "cloudevents.event_spec_version" semantic conventions. It represents the + // [version of the CloudEvents + // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) + // which the event uses. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0' + CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + + // CloudeventsEventTypeKey is the attribute Key conforming to the + // "cloudevents.event_type" semantic conventions. It represents the + // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) + // contains a value describing the type of event related to the originating + // occurrence. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'com.github.pull_request.opened', + // 'com.example.object.deleted.v2' + CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") + + // CloudeventsEventSubjectKey is the attribute Key conforming to the + // "cloudevents.event_subject" semantic conventions. It represents the + // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) + // of the event in the context of the event producer (identified by + // source). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'mynewfile.jpg' + CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") +) + +// CloudeventsEventID returns an attribute KeyValue conforming to the +// "cloudevents.event_id" semantic conventions. It represents the +// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) +// uniquely identifies the event. +func CloudeventsEventID(val string) attribute.KeyValue { + return CloudeventsEventIDKey.String(val) +} + +// CloudeventsEventSource returns an attribute KeyValue conforming to the +// "cloudevents.event_source" semantic conventions. It represents the +// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) +// identifies the context in which an event happened. +func CloudeventsEventSource(val string) attribute.KeyValue { + return CloudeventsEventSourceKey.String(val) +} + +// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to +// the "cloudevents.event_spec_version" semantic conventions. It represents the +// [version of the CloudEvents +// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) +// which the event uses. +func CloudeventsEventSpecVersion(val string) attribute.KeyValue { + return CloudeventsEventSpecVersionKey.String(val) +} + +// CloudeventsEventType returns an attribute KeyValue conforming to the +// "cloudevents.event_type" semantic conventions. It represents the +// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) +// contains a value describing the type of event related to the originating +// occurrence. +func CloudeventsEventType(val string) attribute.KeyValue { + return CloudeventsEventTypeKey.String(val) +} + +// CloudeventsEventSubject returns an attribute KeyValue conforming to the +// "cloudevents.event_subject" semantic conventions. It represents the +// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) +// of the event in the context of the event producer (identified by source). +func CloudeventsEventSubject(val string) attribute.KeyValue { + return CloudeventsEventSubjectKey.String(val) +} + +// Semantic conventions for the OpenTracing Shim +const ( + // OpentracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the + // parent-child Reference type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: The causal relationship between a child Span and a parent Span. + OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +var ( + // The parent Span depends on the child Span in some capacity + OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") + // The parent Span does not depend in any way on the result of the child Span + OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") +) + +// The attributes used to perform database client calls. +const ( + // DBSystemKey is the attribute Key conforming to the "db.system" semantic + // conventions. It represents an identifier for the database management + // system (DBMS) product being used. See below for a list of well-known + // identifiers. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + DBSystemKey = attribute.Key("db.system") + + // DBConnectionStringKey is the attribute Key conforming to the + // "db.connection_string" semantic conventions. It represents the + // connection string used to connect to the database. It is recommended to + // remove embedded credentials. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' + DBConnectionStringKey = attribute.Key("db.connection_string") + + // DBUserKey is the attribute Key conforming to the "db.user" semantic + // conventions. It represents the username for accessing the database. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'readonly_user', 'reporting_user' + DBUserKey = attribute.Key("db.user") + + // DBJDBCDriverClassnameKey is the attribute Key conforming to the + // "db.jdbc.driver_classname" semantic conventions. It represents the + // fully-qualified class name of the [Java Database Connectivity + // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) + // driver used to connect. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'org.postgresql.Driver', + // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' + DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") + + // DBNameKey is the attribute Key conforming to the "db.name" semantic + // conventions. It represents the this attribute is used to report the name + // of the database being accessed. For commands that switch the database, + // this should be set to the target database (even if the command fails). + // + // Type: string + // RequirementLevel: ConditionallyRequired (If applicable.) + // Stability: stable + // Examples: 'customers', 'main' + // Note: In some SQL databases, the database name to be used is called + // "schema name". In case there are multiple layers that could be + // considered for database name (e.g. Oracle instance name and schema + // name), the database name to be used is the more specific layer (e.g. + // Oracle schema name). + DBNameKey = attribute.Key("db.name") + + // DBStatementKey is the attribute Key conforming to the "db.statement" + // semantic conventions. It represents the database statement being + // executed. + // + // Type: string + // RequirementLevel: Recommended (Should be collected by default only if + // there is sanitization that excludes sensitive information.) + // Stability: stable + // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' + DBStatementKey = attribute.Key("db.statement") + + // DBOperationKey is the attribute Key conforming to the "db.operation" + // semantic conventions. It represents the name of the operation being + // executed, e.g. the [MongoDB command + // name](https://docs.mongodb.com/manual/reference/command/#database-operations) + // such as `findAndModify`, or the SQL keyword. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If `db.statement` is not + // applicable.) + // Stability: stable + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: When setting this to an SQL keyword, it is not recommended to + // attempt any client-side parsing of `db.statement` just to get this + // property, but it should be set if the operation name is provided by the + // library being instrumented. If the SQL statement has an ambiguous + // operation, or performs more than one operation, this value may be + // omitted. + DBOperationKey = attribute.Key("db.operation") +) + +var ( + // Some other SQL database. Fallback only. See notes + DBSystemOtherSQL = DBSystemKey.String("other_sql") + // Microsoft SQL Server + DBSystemMSSQL = DBSystemKey.String("mssql") + // Microsoft SQL Server Compact + DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") + // MySQL + DBSystemMySQL = DBSystemKey.String("mysql") + // Oracle Database + DBSystemOracle = DBSystemKey.String("oracle") + // IBM DB2 + DBSystemDB2 = DBSystemKey.String("db2") + // PostgreSQL + DBSystemPostgreSQL = DBSystemKey.String("postgresql") + // Amazon Redshift + DBSystemRedshift = DBSystemKey.String("redshift") + // Apache Hive + DBSystemHive = DBSystemKey.String("hive") + // Cloudscape + DBSystemCloudscape = DBSystemKey.String("cloudscape") + // HyperSQL DataBase + DBSystemHSQLDB = DBSystemKey.String("hsqldb") + // Progress Database + DBSystemProgress = DBSystemKey.String("progress") + // SAP MaxDB + DBSystemMaxDB = DBSystemKey.String("maxdb") + // SAP HANA + DBSystemHanaDB = DBSystemKey.String("hanadb") + // Ingres + DBSystemIngres = DBSystemKey.String("ingres") + // FirstSQL + DBSystemFirstSQL = DBSystemKey.String("firstsql") + // EnterpriseDB + DBSystemEDB = DBSystemKey.String("edb") + // InterSystems Caché + DBSystemCache = DBSystemKey.String("cache") + // Adabas (Adaptable Database System) + DBSystemAdabas = DBSystemKey.String("adabas") + // Firebird + DBSystemFirebird = DBSystemKey.String("firebird") + // Apache Derby + DBSystemDerby = DBSystemKey.String("derby") + // FileMaker + DBSystemFilemaker = DBSystemKey.String("filemaker") + // Informix + DBSystemInformix = DBSystemKey.String("informix") + // InstantDB + DBSystemInstantDB = DBSystemKey.String("instantdb") + // InterBase + DBSystemInterbase = DBSystemKey.String("interbase") + // MariaDB + DBSystemMariaDB = DBSystemKey.String("mariadb") + // Netezza + DBSystemNetezza = DBSystemKey.String("netezza") + // Pervasive PSQL + DBSystemPervasive = DBSystemKey.String("pervasive") + // PointBase + DBSystemPointbase = DBSystemKey.String("pointbase") + // SQLite + DBSystemSqlite = DBSystemKey.String("sqlite") + // Sybase + DBSystemSybase = DBSystemKey.String("sybase") + // Teradata + DBSystemTeradata = DBSystemKey.String("teradata") + // Vertica + DBSystemVertica = DBSystemKey.String("vertica") + // H2 + DBSystemH2 = DBSystemKey.String("h2") + // ColdFusion IMQ + DBSystemColdfusion = DBSystemKey.String("coldfusion") + // Apache Cassandra + DBSystemCassandra = DBSystemKey.String("cassandra") + // Apache HBase + DBSystemHBase = DBSystemKey.String("hbase") + // MongoDB + DBSystemMongoDB = DBSystemKey.String("mongodb") + // Redis + DBSystemRedis = DBSystemKey.String("redis") + // Couchbase + DBSystemCouchbase = DBSystemKey.String("couchbase") + // CouchDB + DBSystemCouchDB = DBSystemKey.String("couchdb") + // Microsoft Azure Cosmos DB + DBSystemCosmosDB = DBSystemKey.String("cosmosdb") + // Amazon DynamoDB + DBSystemDynamoDB = DBSystemKey.String("dynamodb") + // Neo4j + DBSystemNeo4j = DBSystemKey.String("neo4j") + // Apache Geode + DBSystemGeode = DBSystemKey.String("geode") + // Elasticsearch + DBSystemElasticsearch = DBSystemKey.String("elasticsearch") + // Memcached + DBSystemMemcached = DBSystemKey.String("memcached") + // CockroachDB + DBSystemCockroachdb = DBSystemKey.String("cockroachdb") + // OpenSearch + DBSystemOpensearch = DBSystemKey.String("opensearch") + // ClickHouse + DBSystemClickhouse = DBSystemKey.String("clickhouse") + // Cloud Spanner + DBSystemSpanner = DBSystemKey.String("spanner") + // Trino + DBSystemTrino = DBSystemKey.String("trino") +) + +// DBConnectionString returns an attribute KeyValue conforming to the +// "db.connection_string" semantic conventions. It represents the connection +// string used to connect to the database. It is recommended to remove embedded +// credentials. +func DBConnectionString(val string) attribute.KeyValue { + return DBConnectionStringKey.String(val) +} + +// DBUser returns an attribute KeyValue conforming to the "db.user" semantic +// conventions. It represents the username for accessing the database. +func DBUser(val string) attribute.KeyValue { + return DBUserKey.String(val) +} + +// DBJDBCDriverClassname returns an attribute KeyValue conforming to the +// "db.jdbc.driver_classname" semantic conventions. It represents the +// fully-qualified class name of the [Java Database Connectivity +// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver +// used to connect. +func DBJDBCDriverClassname(val string) attribute.KeyValue { + return DBJDBCDriverClassnameKey.String(val) +} + +// DBName returns an attribute KeyValue conforming to the "db.name" semantic +// conventions. It represents the this attribute is used to report the name of +// the database being accessed. For commands that switch the database, this +// should be set to the target database (even if the command fails). +func DBName(val string) attribute.KeyValue { + return DBNameKey.String(val) +} + +// DBStatement returns an attribute KeyValue conforming to the +// "db.statement" semantic conventions. It represents the database statement +// being executed. +func DBStatement(val string) attribute.KeyValue { + return DBStatementKey.String(val) +} + +// DBOperation returns an attribute KeyValue conforming to the +// "db.operation" semantic conventions. It represents the name of the operation +// being executed, e.g. the [MongoDB command +// name](https://docs.mongodb.com/manual/reference/command/#database-operations) +// such as `findAndModify`, or the SQL keyword. +func DBOperation(val string) attribute.KeyValue { + return DBOperationKey.String(val) +} + +// Connection-level attributes for Microsoft SQL Server +const ( + // DBMSSQLInstanceNameKey is the attribute Key conforming to the + // "db.mssql.instance_name" semantic conventions. It represents the + // Microsoft SQL Server [instance + // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) + // connecting to. This name is used to determine the port of a named + // instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MSSQLSERVER' + // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no + // longer required (but still recommended if non-standard). + DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") +) + +// DBMSSQLInstanceName returns an attribute KeyValue conforming to the +// "db.mssql.instance_name" semantic conventions. It represents the Microsoft +// SQL Server [instance +// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) +// connecting to. This name is used to determine the port of a named instance. +func DBMSSQLInstanceName(val string) attribute.KeyValue { + return DBMSSQLInstanceNameKey.String(val) +} + +// Call-level attributes for Cassandra +const ( + // DBCassandraPageSizeKey is the attribute Key conforming to the + // "db.cassandra.page_size" semantic conventions. It represents the fetch + // size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 5000 + DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") + + // DBCassandraConsistencyLevelKey is the attribute Key conforming to the + // "db.cassandra.consistency_level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from + // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") + + // DBCassandraTableKey is the attribute Key conforming to the + // "db.cassandra.table" semantic conventions. It represents the name of the + // primary table that the operation is acting upon, including the keyspace + // name (if applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'mytable' + // Note: This mirrors the db.sql.table attribute but references cassandra + // rather than sql. It is not recommended to attempt any client-side + // parsing of `db.statement` just to get this property, but it should be + // set if it is provided by the library being instrumented. If the + // operation is acting upon an anonymous table, or more than one table, + // this value MUST NOT be set. + DBCassandraTableKey = attribute.Key("db.cassandra.table") + + // DBCassandraIdempotenceKey is the attribute Key conforming to the + // "db.cassandra.idempotence" semantic conventions. It represents the + // whether or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") + + // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming + // to the "db.cassandra.speculative_execution_count" semantic conventions. + // It represents the number of times a query was speculatively executed. + // Not set or `0` if the query was not executed speculatively. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 0, 2 + DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") + + // DBCassandraCoordinatorIDKey is the attribute Key conforming to the + // "db.cassandra.coordinator.id" semantic conventions. It represents the ID + // of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") + + // DBCassandraCoordinatorDCKey is the attribute Key conforming to the + // "db.cassandra.coordinator.dc" semantic conventions. It represents the + // data center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-west-2' + DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") +) + +var ( + // all + DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") + // each_quorum + DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") + // quorum + DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") + // local_quorum + DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") + // one + DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") + // two + DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") + // three + DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") + // local_one + DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") + // any + DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") + // serial + DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") + // local_serial + DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") +) + +// DBCassandraPageSize returns an attribute KeyValue conforming to the +// "db.cassandra.page_size" semantic conventions. It represents the fetch size +// used for paging, i.e. how many rows will be returned at once. +func DBCassandraPageSize(val int) attribute.KeyValue { + return DBCassandraPageSizeKey.Int(val) +} + +// DBCassandraTable returns an attribute KeyValue conforming to the +// "db.cassandra.table" semantic conventions. It represents the name of the +// primary table that the operation is acting upon, including the keyspace name +// (if applicable). +func DBCassandraTable(val string) attribute.KeyValue { + return DBCassandraTableKey.String(val) +} + +// DBCassandraIdempotence returns an attribute KeyValue conforming to the +// "db.cassandra.idempotence" semantic conventions. It represents the whether +// or not the query is idempotent. +func DBCassandraIdempotence(val bool) attribute.KeyValue { + return DBCassandraIdempotenceKey.Bool(val) +} + +// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue +// conforming to the "db.cassandra.speculative_execution_count" semantic +// conventions. It represents the number of times a query was speculatively +// executed. Not set or `0` if the query was not executed speculatively. +func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return DBCassandraSpeculativeExecutionCountKey.Int(val) +} + +// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of +// the coordinating node for a query. +func DBCassandraCoordinatorID(val string) attribute.KeyValue { + return DBCassandraCoordinatorIDKey.String(val) +} + +// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.dc" semantic conventions. It represents the data +// center of the coordinating node for a query. +func DBCassandraCoordinatorDC(val string) attribute.KeyValue { + return DBCassandraCoordinatorDCKey.String(val) +} + +// Call-level attributes for Redis +const ( + // DBRedisDBIndexKey is the attribute Key conforming to the + // "db.redis.database_index" semantic conventions. It represents the index + // of the database being accessed as used in the [`SELECT` + // command](https://redis.io/commands/select), provided as an integer. To + // be used instead of the generic `db.name` attribute. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If other than the default + // database (`0`).) + // Stability: stable + // Examples: 0, 1, 15 + DBRedisDBIndexKey = attribute.Key("db.redis.database_index") +) + +// DBRedisDBIndex returns an attribute KeyValue conforming to the +// "db.redis.database_index" semantic conventions. It represents the index of +// the database being accessed as used in the [`SELECT` +// command](https://redis.io/commands/select), provided as an integer. To be +// used instead of the generic `db.name` attribute. +func DBRedisDBIndex(val int) attribute.KeyValue { + return DBRedisDBIndexKey.Int(val) +} + +// Call-level attributes for MongoDB +const ( + // DBMongoDBCollectionKey is the attribute Key conforming to the + // "db.mongodb.collection" semantic conventions. It represents the + // collection being accessed within the database stated in `db.name`. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'customers', 'products' + DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") +) + +// DBMongoDBCollection returns an attribute KeyValue conforming to the +// "db.mongodb.collection" semantic conventions. It represents the collection +// being accessed within the database stated in `db.name`. +func DBMongoDBCollection(val string) attribute.KeyValue { + return DBMongoDBCollectionKey.String(val) +} + +// Call-level attributes for SQL databases +const ( + // DBSQLTableKey is the attribute Key conforming to the "db.sql.table" + // semantic conventions. It represents the name of the primary table that + // the operation is acting upon, including the database name (if + // applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'public.users', 'customers' + // Note: It is not recommended to attempt any client-side parsing of + // `db.statement` just to get this property, but it should be set if it is + // provided by the library being instrumented. If the operation is acting + // upon an anonymous table, or more than one table, this value MUST NOT be + // set. + DBSQLTableKey = attribute.Key("db.sql.table") +) + +// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table" +// semantic conventions. It represents the name of the primary table that the +// operation is acting upon, including the database name (if applicable). +func DBSQLTable(val string) attribute.KeyValue { + return DBSQLTableKey.String(val) +} + +// Call-level attributes for Cosmos DB. +const ( + // DBCosmosDBClientIDKey is the attribute Key conforming to the + // "db.cosmosdb.client_id" semantic conventions. It represents the unique + // Cosmos client instance id. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' + DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") + + // DBCosmosDBOperationTypeKey is the attribute Key conforming to the + // "db.cosmosdb.operation_type" semantic conventions. It represents the + // cosmosDB Operation Type. + // + // Type: Enum + // RequirementLevel: ConditionallyRequired (when performing one of the + // operations in this list) + // Stability: stable + DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") + + // DBCosmosDBConnectionModeKey is the attribute Key conforming to the + // "db.cosmosdb.connection_mode" semantic conventions. It represents the + // cosmos client connection mode. + // + // Type: Enum + // RequirementLevel: ConditionallyRequired (if not `direct` (or pick gw as + // default)) + // Stability: stable + DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") + + // DBCosmosDBContainerKey is the attribute Key conforming to the + // "db.cosmosdb.container" semantic conventions. It represents the cosmos + // DB container name. + // + // Type: string + // RequirementLevel: ConditionallyRequired (if available) + // Stability: stable + // Examples: 'anystring' + DBCosmosDBContainerKey = attribute.Key("db.cosmosdb.container") + + // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the + // "db.cosmosdb.request_content_length" semantic conventions. It represents + // the request payload size in bytes + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") + + // DBCosmosDBStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos + // DB status code. + // + // Type: int + // RequirementLevel: ConditionallyRequired (if response was received) + // Stability: stable + // Examples: 200, 201 + DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") + + // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.sub_status_code" semantic conventions. It represents the + // cosmos DB sub status code. + // + // Type: int + // RequirementLevel: ConditionallyRequired (when response was received and + // contained sub-code.) + // Stability: stable + // Examples: 1000, 1002 + DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") + + // DBCosmosDBRequestChargeKey is the attribute Key conforming to the + // "db.cosmosdb.request_charge" semantic conventions. It represents the rU + // consumed for that operation + // + // Type: double + // RequirementLevel: ConditionallyRequired (when available) + // Stability: stable + // Examples: 46.18, 1.0 + DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") +) + +var ( + // invalid + DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") + // create + DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") + // patch + DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") + // read + DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") + // read_feed + DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") + // delete + DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") + // replace + DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") + // execute + DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") + // query + DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") + // head + DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") + // head_feed + DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") + // upsert + DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") + // batch + DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") + // query_plan + DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") + // execute_javascript + DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") +) + +var ( + // Gateway (HTTP) connections mode + DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") + // Direct connection + DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") +) + +// DBCosmosDBClientID returns an attribute KeyValue conforming to the +// "db.cosmosdb.client_id" semantic conventions. It represents the unique +// Cosmos client instance id. +func DBCosmosDBClientID(val string) attribute.KeyValue { + return DBCosmosDBClientIDKey.String(val) +} + +// DBCosmosDBContainer returns an attribute KeyValue conforming to the +// "db.cosmosdb.container" semantic conventions. It represents the cosmos DB +// container name. +func DBCosmosDBContainer(val string) attribute.KeyValue { + return DBCosmosDBContainerKey.String(val) +} + +// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming +// to the "db.cosmosdb.request_content_length" semantic conventions. It +// represents the request payload size in bytes +func DBCosmosDBRequestContentLength(val int) attribute.KeyValue { + return DBCosmosDBRequestContentLengthKey.Int(val) +} + +// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB +// status code. +func DBCosmosDBStatusCode(val int) attribute.KeyValue { + return DBCosmosDBStatusCodeKey.Int(val) +} + +// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos +// DB sub status code. +func DBCosmosDBSubStatusCode(val int) attribute.KeyValue { + return DBCosmosDBSubStatusCodeKey.Int(val) +} + +// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the +// "db.cosmosdb.request_charge" semantic conventions. It represents the rU +// consumed for that operation +func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { + return DBCosmosDBRequestChargeKey.Float64(val) +} + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's +// concepts. +const ( + // OTelStatusCodeKey is the attribute Key conforming to the + // "otel.status_code" semantic conventions. It represents the name of the + // code, either "OK" or "ERROR". MUST NOT be set if the status code is + // UNSET. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + OTelStatusCodeKey = attribute.Key("otel.status_code") + + // OTelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the + // description of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'resource not found' + OTelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +var ( + // The operation has been validated by an Application developer or Operator to have completed successfully + OTelStatusCodeOk = OTelStatusCodeKey.String("OK") + // The operation contains an error + OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") +) + +// OTelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the +// description of the Status if it has a value, otherwise not set. +func OTelStatusDescription(val string) attribute.KeyValue { + return OTelStatusDescriptionKey.String(val) +} + +// This semantic convention describes an instance of a function that runs +// without provisioning or managing of servers (also known as serverless +// functions or Function as a Service (FaaS)) with spans. +const ( + // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" + // semantic conventions. It represents the type of the trigger which caused + // this function invocation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: For the server/consumer span on the incoming side, + // `faas.trigger` MUST be set. + // + // Clients invoking FaaS instances usually cannot set `faas.trigger`, + // since they would typically need to look in the payload to determine + // the event type. If clients set it, it should be the same as the + // trigger that corresponding incoming would have (i.e., this has + // nothing to do with the underlying transport used to make the API + // call to invoke the lambda, which is often HTTP). + FaaSTriggerKey = attribute.Key("faas.trigger") + + // FaaSInvocationIDKey is the attribute Key conforming to the + // "faas.invocation_id" semantic conventions. It represents the invocation + // ID of the current function invocation. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + FaaSInvocationIDKey = attribute.Key("faas.invocation_id") +) + +var ( + // A response to some data source operation such as a database or filesystem read/write + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// FaaSInvocationID returns an attribute KeyValue conforming to the +// "faas.invocation_id" semantic conventions. It represents the invocation ID +// of the current function invocation. +func FaaSInvocationID(val string) attribute.KeyValue { + return FaaSInvocationIDKey.String(val) +} + +// Semantic Convention for FaaS triggered as a response to some data source +// operation such as a database or filesystem read/write. +const ( + // FaaSDocumentCollectionKey is the attribute Key conforming to the + // "faas.document.collection" semantic conventions. It represents the name + // of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in + // Cosmos DB to the database name. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myBucketName', 'myDBName' + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + + // FaaSDocumentOperationKey is the attribute Key conforming to the + // "faas.document.operation" semantic conventions. It represents the + // describes the type of the operation that was performed on the data. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + + // FaaSDocumentTimeKey is the attribute Key conforming to the + // "faas.document.time" semantic conventions. It represents a string + // containing the time when the data was accessed in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSDocumentTimeKey = attribute.Key("faas.document.time") + + // FaaSDocumentNameKey is the attribute Key conforming to the + // "faas.document.name" semantic conventions. It represents the document + // name/table subjected to the operation. For example, in Cloud Storage or + // S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'myFile.txt', 'myTableName' + FaaSDocumentNameKey = attribute.Key("faas.document.name") +) + +var ( + // When a new object is created + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +// FaaSDocumentCollection returns an attribute KeyValue conforming to the +// "faas.document.collection" semantic conventions. It represents the name of +// the source on which the triggering operation was performed. For example, in +// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the +// database name. +func FaaSDocumentCollection(val string) attribute.KeyValue { + return FaaSDocumentCollectionKey.String(val) +} + +// FaaSDocumentTime returns an attribute KeyValue conforming to the +// "faas.document.time" semantic conventions. It represents a string containing +// the time when the data was accessed in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSDocumentTime(val string) attribute.KeyValue { + return FaaSDocumentTimeKey.String(val) +} + +// FaaSDocumentName returns an attribute KeyValue conforming to the +// "faas.document.name" semantic conventions. It represents the document +// name/table subjected to the operation. For example, in Cloud Storage or S3 +// is the name of the file, and in Cosmos DB the table name. +func FaaSDocumentName(val string) attribute.KeyValue { + return FaaSDocumentNameKey.String(val) +} + +// Semantic Convention for FaaS scheduled to be executed regularly. +const ( + // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic + // conventions. It represents a string containing the function invocation + // time in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSTimeKey = attribute.Key("faas.time") + + // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic + // conventions. It represents a string containing the schedule period as + // [Cron + // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0/5 * * * ? *' + FaaSCronKey = attribute.Key("faas.cron") +) + +// FaaSTime returns an attribute KeyValue conforming to the "faas.time" +// semantic conventions. It represents a string containing the function +// invocation time in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSTime(val string) attribute.KeyValue { + return FaaSTimeKey.String(val) +} + +// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" +// semantic conventions. It represents a string containing the schedule period +// as [Cron +// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). +func FaaSCron(val string) attribute.KeyValue { + return FaaSCronKey.String(val) +} + +// Contains additional attributes for incoming FaaS spans. +const ( + // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" + // semantic conventions. It represents a boolean that is true if the + // serverless function is executed for the first time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + FaaSColdstartKey = attribute.Key("faas.coldstart") +) + +// FaaSColdstart returns an attribute KeyValue conforming to the +// "faas.coldstart" semantic conventions. It represents a boolean that is true +// if the serverless function is executed for the first time (aka cold-start). +func FaaSColdstart(val bool) attribute.KeyValue { + return FaaSColdstartKey.Bool(val) +} + +// Contains additional attributes for outgoing FaaS spans. +const ( + // FaaSInvokedNameKey is the attribute Key conforming to the + // "faas.invoked_name" semantic conventions. It represents the name of the + // invoked function. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'my-function' + // Note: SHOULD be equal to the `faas.name` resource attribute of the + // invoked function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + + // FaaSInvokedProviderKey is the attribute Key conforming to the + // "faas.invoked_provider" semantic conventions. It represents the cloud + // provider of the invoked function. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the + // invoked function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + + // FaaSInvokedRegionKey is the attribute Key conforming to the + // "faas.invoked_region" semantic conventions. It represents the cloud + // region of the invoked function. + // + // Type: string + // RequirementLevel: ConditionallyRequired (For some cloud providers, like + // AWS or GCP, the region in which a function is hosted is essential to + // uniquely identify the function and also part of its endpoint. Since it's + // part of the endpoint being called, the region is always known to + // clients. In these cases, `faas.invoked_region` MUST be set accordingly. + // If the region is unknown to the client or not required for identifying + // the invoked function, setting `faas.invoked_region` is optional.) + // Stability: stable + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the `cloud.region` resource attribute of the + // invoked function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") +) + +var ( + // Alibaba Cloud + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +// FaaSInvokedName returns an attribute KeyValue conforming to the +// "faas.invoked_name" semantic conventions. It represents the name of the +// invoked function. +func FaaSInvokedName(val string) attribute.KeyValue { + return FaaSInvokedNameKey.String(val) +} + +// FaaSInvokedRegion returns an attribute KeyValue conforming to the +// "faas.invoked_region" semantic conventions. It represents the cloud region +// of the invoked function. +func FaaSInvokedRegion(val string) attribute.KeyValue { + return FaaSInvokedRegionKey.String(val) +} + +// Operations that access some remote service. +const ( + // PeerServiceKey is the attribute Key conforming to the "peer.service" + // semantic conventions. It represents the + // [`service.name`](../../resource/semantic_conventions/README.md#service) + // of the remote service. SHOULD be equal to the actual `service.name` + // resource attribute of the remote service if any. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'AuthTokenCache' + PeerServiceKey = attribute.Key("peer.service") +) + +// PeerService returns an attribute KeyValue conforming to the +// "peer.service" semantic conventions. It represents the +// [`service.name`](../../resource/semantic_conventions/README.md#service) of +// the remote service. SHOULD be equal to the actual `service.name` resource +// attribute of the remote service if any. +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + +// These attributes may be used for any operation with an authenticated and/or +// authorized enduser. +const ( + // EnduserIDKey is the attribute Key conforming to the "enduser.id" + // semantic conventions. It represents the username or client_id extracted + // from the access token or + // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header + // in the inbound request from outside the system. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'username' + EnduserIDKey = attribute.Key("enduser.id") + + // EnduserRoleKey is the attribute Key conforming to the "enduser.role" + // semantic conventions. It represents the actual/assumed role the client + // is making the request under extracted from token or application security + // context. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'admin' + EnduserRoleKey = attribute.Key("enduser.role") + + // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" + // semantic conventions. It represents the scopes or granted authorities + // the client currently possesses extracted from token or application + // security context. The value would come from the scope associated with an + // [OAuth 2.0 Access + // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute + // value in a [SAML 2.0 + // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'read:message, write:files' + EnduserScopeKey = attribute.Key("enduser.scope") +) + +// EnduserID returns an attribute KeyValue conforming to the "enduser.id" +// semantic conventions. It represents the username or client_id extracted from +// the access token or +// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in +// the inbound request from outside the system. +func EnduserID(val string) attribute.KeyValue { + return EnduserIDKey.String(val) +} + +// EnduserRole returns an attribute KeyValue conforming to the +// "enduser.role" semantic conventions. It represents the actual/assumed role +// the client is making the request under extracted from token or application +// security context. +func EnduserRole(val string) attribute.KeyValue { + return EnduserRoleKey.String(val) +} + +// EnduserScope returns an attribute KeyValue conforming to the +// "enduser.scope" semantic conventions. It represents the scopes or granted +// authorities the client currently possesses extracted from token or +// application security context. The value would come from the scope associated +// with an [OAuth 2.0 Access +// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute +// value in a [SAML 2.0 +// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). +func EnduserScope(val string) attribute.KeyValue { + return EnduserScopeKey.String(val) +} + +// These attributes may be used for any operation to store information about a +// thread that started a span. +const ( + // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic + // conventions. It represents the current "managed" thread ID (as opposed + // to OS thread ID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + ThreadIDKey = attribute.Key("thread.id") + + // ThreadNameKey is the attribute Key conforming to the "thread.name" + // semantic conventions. It represents the current thread name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'main' + ThreadNameKey = attribute.Key("thread.name") +) + +// ThreadID returns an attribute KeyValue conforming to the "thread.id" +// semantic conventions. It represents the current "managed" thread ID (as +// opposed to OS thread ID). +func ThreadID(val int) attribute.KeyValue { + return ThreadIDKey.Int(val) +} + +// ThreadName returns an attribute KeyValue conforming to the "thread.name" +// semantic conventions. It represents the current thread name. +func ThreadName(val string) attribute.KeyValue { + return ThreadNameKey.String(val) +} + +// These attributes allow to report this unit of code and therefore to provide +// more context about the span. +const ( + // CodeFunctionKey is the attribute Key conforming to the "code.function" + // semantic conventions. It represents the method or function name, or + // equivalent (usually rightmost part of the code unit's name). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'serveRequest' + CodeFunctionKey = attribute.Key("code.function") + + // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" + // semantic conventions. It represents the "namespace" within which + // `code.function` is defined. Usually the qualified class or module name, + // such that `code.namespace` + some separator + `code.function` form a + // unique identifier for the code unit. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'com.example.MyHTTPService' + CodeNamespaceKey = attribute.Key("code.namespace") + + // CodeFilepathKey is the attribute Key conforming to the "code.filepath" + // semantic conventions. It represents the source code file name that + // identifies the code unit as uniquely as possible (preferably an absolute + // file path). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + CodeFilepathKey = attribute.Key("code.filepath") + + // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" + // semantic conventions. It represents the line number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + CodeLineNumberKey = attribute.Key("code.lineno") + + // CodeColumnKey is the attribute Key conforming to the "code.column" + // semantic conventions. It represents the column number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 16 + CodeColumnKey = attribute.Key("code.column") +) + +// CodeFunction returns an attribute KeyValue conforming to the +// "code.function" semantic conventions. It represents the method or function +// name, or equivalent (usually rightmost part of the code unit's name). +func CodeFunction(val string) attribute.KeyValue { + return CodeFunctionKey.String(val) +} + +// CodeNamespace returns an attribute KeyValue conforming to the +// "code.namespace" semantic conventions. It represents the "namespace" within +// which `code.function` is defined. Usually the qualified class or module +// name, such that `code.namespace` + some separator + `code.function` form a +// unique identifier for the code unit. +func CodeNamespace(val string) attribute.KeyValue { + return CodeNamespaceKey.String(val) +} + +// CodeFilepath returns an attribute KeyValue conforming to the +// "code.filepath" semantic conventions. It represents the source code file +// name that identifies the code unit as uniquely as possible (preferably an +// absolute file path). +func CodeFilepath(val string) attribute.KeyValue { + return CodeFilepathKey.String(val) +} + +// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" +// semantic conventions. It represents the line number in `code.filepath` best +// representing the operation. It SHOULD point within the code unit named in +// `code.function`. +func CodeLineNumber(val int) attribute.KeyValue { + return CodeLineNumberKey.Int(val) +} + +// CodeColumn returns an attribute KeyValue conforming to the "code.column" +// semantic conventions. It represents the column number in `code.filepath` +// best representing the operation. It SHOULD point within the code unit named +// in `code.function`. +func CodeColumn(val int) attribute.KeyValue { + return CodeColumnKey.Int(val) +} + +// Semantic Convention for HTTP Client +const ( + // HTTPURLKey is the attribute Key conforming to the "http.url" semantic + // conventions. It represents the full HTTP request URL in the form + // `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is + // not transmitted over HTTP, but if it is known, it should be included + // nevertheless. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' + // Note: `http.url` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case the + // attribute's value should be `https://www.example.com/`. + HTTPURLKey = attribute.Key("http.url") + + // HTTPResendCountKey is the attribute Key conforming to the + // "http.resend_count" semantic conventions. It represents the ordinal + // number of request resending attempt (for any reason, including + // redirects). + // + // Type: int + // RequirementLevel: Recommended (if and only if request was retried.) + // Stability: stable + // Examples: 3 + // Note: The resend count SHOULD be updated each time an HTTP request gets + // resent by the client, regardless of what was the cause of the resending + // (e.g. redirection, authorization failure, 503 Server Unavailable, + // network issues, or any other). + HTTPResendCountKey = attribute.Key("http.resend_count") +) + +// HTTPURL returns an attribute KeyValue conforming to the "http.url" +// semantic conventions. It represents the full HTTP request URL in the form +// `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is not +// transmitted over HTTP, but if it is known, it should be included +// nevertheless. +func HTTPURL(val string) attribute.KeyValue { + return HTTPURLKey.String(val) +} + +// HTTPResendCount returns an attribute KeyValue conforming to the +// "http.resend_count" semantic conventions. It represents the ordinal number +// of request resending attempt (for any reason, including redirects). +func HTTPResendCount(val int) attribute.KeyValue { + return HTTPResendCountKey.Int(val) +} + +// Semantic Convention for HTTP Server +const ( + // HTTPTargetKey is the attribute Key conforming to the "http.target" + // semantic conventions. It represents the full request target as passed in + // a HTTP request line or equivalent. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '/users/12314/?q=ddds' + HTTPTargetKey = attribute.Key("http.target") + + // HTTPClientIPKey is the attribute Key conforming to the "http.client_ip" + // semantic conventions. It represents the IP address of the original + // client behind all proxies, if known (e.g. from + // [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '83.164.160.102' + // Note: This is not necessarily the same as `net.sock.peer.addr`, which + // would + // identify the network-level peer, which may be a proxy. + // + // This attribute should be set when a source of information different + // from the one used for `net.sock.peer.addr`, is available even if that + // other + // source just confirms the same value as `net.sock.peer.addr`. + // Rationale: For `net.sock.peer.addr`, one typically does not know if it + // comes from a proxy, reverse proxy, or the actual client. Setting + // `http.client_ip` when it's the same as `net.sock.peer.addr` means that + // one is at least somewhat confident that the address is not that of + // the closest proxy. + HTTPClientIPKey = attribute.Key("http.client_ip") +) + +// HTTPTarget returns an attribute KeyValue conforming to the "http.target" +// semantic conventions. It represents the full request target as passed in a +// HTTP request line or equivalent. +func HTTPTarget(val string) attribute.KeyValue { + return HTTPTargetKey.String(val) +} + +// HTTPClientIP returns an attribute KeyValue conforming to the +// "http.client_ip" semantic conventions. It represents the IP address of the +// original client behind all proxies, if known (e.g. from +// [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). +func HTTPClientIP(val string) attribute.KeyValue { + return HTTPClientIPKey.String(val) +} + +// The `aws` conventions apply to operations using the AWS SDK. They map +// request or response parameters in AWS SDK API calls to attributes on a Span. +// The conventions have been collected over time based on feedback from AWS +// users of tracing and will continue to evolve as new interesting conventions +// are found. +// Some descriptions are also provided for populating general OpenTelemetry +// semantic conventions based on these APIs. +const ( + // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" + // semantic conventions. It represents the AWS request ID as returned in + // the response headers `x-amz-request-id` or `x-amz-requestid`. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' + AWSRequestIDKey = attribute.Key("aws.request_id") +) + +// AWSRequestID returns an attribute KeyValue conforming to the +// "aws.request_id" semantic conventions. It represents the AWS request ID as +// returned in the response headers `x-amz-request-id` or `x-amz-requestid`. +func AWSRequestID(val string) attribute.KeyValue { + return AWSRequestIDKey.String(val) +} + +// Attributes that exist for multiple DynamoDB request types. +const ( + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys + // in the `RequestItems` object field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Users', 'Cats' + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") + + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response + // field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { + // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number }, "TableName": "string", + // "WriteCapacityUnits": number }' + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to + // the "aws.dynamodb.item_collection_metrics" semantic conventions. It + // represents the JSON-serialized value of the `ItemCollectionMetrics` + // response field. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": + // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { + // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], + // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, + // "SizeEstimateRangeGB": [ number ] } ] }' + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to + // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It + // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` + // request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming + // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. + // It represents the value of the + // `ProvisionedThroughput.WriteCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the + // value of the `ConsistentRead` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value + // of the `ProjectionExpression` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, + // RelatedItems, ProductReviews' + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of + // the `Limit` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'lives', 'id' + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value + // of the `IndexName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'name_to_group' + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of + // the `Select` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") +) + +// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_names" semantic conventions. It represents the keys in +// the `RequestItems` object field. +func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { + return AWSDynamoDBTableNamesKey.StringSlice(val) +} + +// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to +// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the +// JSON-serialized value of each item in the `ConsumedCapacity` response field. +func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { + return AWSDynamoDBConsumedCapacityKey.StringSlice(val) +} + +// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming +// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It +// represents the JSON-serialized value of the `ItemCollectionMetrics` response +// field. +func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { + return AWSDynamoDBItemCollectionMetricsKey.String(val) +} + +// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.ReadCapacityUnits` request parameter. +func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) +} + +// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.WriteCapacityUnits` request parameter. +func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) +} + +// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the +// "aws.dynamodb.consistent_read" semantic conventions. It represents the value +// of the `ConsistentRead` request parameter. +func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { + return AWSDynamoDBConsistentReadKey.Bool(val) +} + +// AWSDynamoDBProjection returns an attribute KeyValue conforming to the +// "aws.dynamodb.projection" semantic conventions. It represents the value of +// the `ProjectionExpression` request parameter. +func AWSDynamoDBProjection(val string) attribute.KeyValue { + return AWSDynamoDBProjectionKey.String(val) +} + +// AWSDynamoDBLimit returns an attribute KeyValue conforming to the +// "aws.dynamodb.limit" semantic conventions. It represents the value of the +// `Limit` request parameter. +func AWSDynamoDBLimit(val int) attribute.KeyValue { + return AWSDynamoDBLimitKey.Int(val) +} + +// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to +// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the +// value of the `AttributesToGet` request parameter. +func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributesToGetKey.StringSlice(val) +} + +// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the +// "aws.dynamodb.index_name" semantic conventions. It represents the value of +// the `IndexName` request parameter. +func AWSDynamoDBIndexName(val string) attribute.KeyValue { + return AWSDynamoDBIndexNameKey.String(val) +} + +// AWSDynamoDBSelect returns an attribute KeyValue conforming to the +// "aws.dynamodb.select" semantic conventions. It represents the value of the +// `Select` request parameter. +func AWSDynamoDBSelect(val string) attribute.KeyValue { + return AWSDynamoDBSelectKey.String(val) +} + +// DynamoDB.CreateTable +const ( + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `GlobalSecondaryIndexes` request field + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `LocalSecondaryIndexes` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "IndexARN": "string", "IndexName": "string", + // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") +) + +// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_indexes" semantic +// conventions. It represents the JSON-serialized value of each item of the +// `GlobalSecondaryIndexes` request field +func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming +// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It +// represents the JSON-serialized value of each item of the +// `LocalSecondaryIndexes` request field. +func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) +} + +// DynamoDB.ListTables +const ( + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents + // the value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Users', 'CatsTable' + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the the + // number of items in the `TableNames` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") +) + +// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming +// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It +// represents the value of the `ExclusiveStartTableName` request parameter. +func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { + return AWSDynamoDBExclusiveStartTableKey.String(val) +} + +// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_count" semantic conventions. It represents the the +// number of items in the `TableNames` response parameter. +func AWSDynamoDBTableCount(val int) attribute.KeyValue { + return AWSDynamoDBTableCountKey.Int(val) +} + +// DynamoDB.Query +const ( + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the + // value of the `ScanIndexForward` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") +) + +// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the +// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of +// the `ScanIndexForward` request parameter. +func AWSDynamoDBScanForward(val bool) attribute.KeyValue { + return AWSDynamoDBScanForwardKey.Bool(val) +} + +// DynamoDB.Scan +const ( + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of + // the `Segment` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the + // value of the `TotalSegments` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") + + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of + // the `Count` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the + // value of the `ScannedCount` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") +) + +// AWSDynamoDBSegment returns an attribute KeyValue conforming to the +// "aws.dynamodb.segment" semantic conventions. It represents the value of the +// `Segment` request parameter. +func AWSDynamoDBSegment(val int) attribute.KeyValue { + return AWSDynamoDBSegmentKey.Int(val) +} + +// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the +// "aws.dynamodb.total_segments" semantic conventions. It represents the value +// of the `TotalSegments` request parameter. +func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { + return AWSDynamoDBTotalSegmentsKey.Int(val) +} + +// AWSDynamoDBCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.count" semantic conventions. It represents the value of the +// `Count` response parameter. +func AWSDynamoDBCount(val int) attribute.KeyValue { + return AWSDynamoDBCountKey.Int(val) +} + +// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.scanned_count" semantic conventions. It represents the value +// of the `ScannedCount` response parameter. +func AWSDynamoDBScannedCount(val int) attribute.KeyValue { + return AWSDynamoDBScannedCountKey.Int(val) +} + +// DynamoDB.UpdateTable +const ( + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to + // the "aws.dynamodb.attribute_definitions" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `AttributeDefinitions` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key + // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic + // conventions. It represents the JSON-serialized value of each item in the + // the `GlobalSecondaryIndexUpdates` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") +) + +// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming +// to the "aws.dynamodb.attribute_definitions" semantic conventions. It +// represents the JSON-serialized value of each item in the +// `AttributeDefinitions` request field. +func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) +} + +// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic +// conventions. It represents the JSON-serialized value of each item in the the +// `GlobalSecondaryIndexUpdates` request field. +func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) +} + +// Attributes that exist for S3 request types. +const ( + // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" + // semantic conventions. It represents the S3 bucket name the request + // refers to. Corresponds to the `--bucket` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'some-bucket-name' + // Note: The `bucket` attribute is applicable to all S3 operations that + // reference a bucket, i.e. that require the bucket name as a mandatory + // parameter. + // This applies to almost all S3 operations except `list-buckets`. + AWSS3BucketKey = attribute.Key("aws.s3.bucket") + + // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic + // conventions. It represents the S3 object key the request refers to. + // Corresponds to the `--key` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'someFile.yml' + // Note: The `key` attribute is applicable to all object-related S3 + // operations, i.e. that require the object key as a mandatory parameter. + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // - + // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) + // - + // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) + // - + // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) + // - + // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) + // - + // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3KeyKey = attribute.Key("aws.s3.key") + + // AWSS3CopySourceKey is the attribute Key conforming to the + // "aws.s3.copy_source" semantic conventions. It represents the source + // object (in the form `bucket`/`key`) for the copy operation. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'someFile.yml' + // Note: The `copy_source` attribute applies to S3 copy operations and + // corresponds to the `--copy-source` parameter + // of the [copy-object operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") + + // AWSS3UploadIDKey is the attribute Key conforming to the + // "aws.s3.upload_id" semantic conventions. It represents the upload ID + // that identifies the multipart upload. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' + // Note: The `upload_id` attribute applies to S3 multipart-upload + // operations and corresponds to the `--upload-id` parameter + // of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // multipart operations. + // This applies in particular to the following operations: + // + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") + + // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" + // semantic conventions. It represents the delete request container that + // specifies the objects to be deleted. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' + // Note: The `delete` attribute is only applicable to the + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // operation. + // The `delete` attribute corresponds to the `--delete` parameter of the + // [delete-objects operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). + AWSS3DeleteKey = attribute.Key("aws.s3.delete") + + // AWSS3PartNumberKey is the attribute Key conforming to the + // "aws.s3.part_number" semantic conventions. It represents the part number + // of the part being uploaded in a multipart-upload operation. This is a + // positive integer between 1 and 10,000. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3456 + // Note: The `part_number` attribute is only applicable to the + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // and + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + // operations. + // The `part_number` attribute corresponds to the `--part-number` parameter + // of the + // [upload-part operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). + AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") +) + +// AWSS3Bucket returns an attribute KeyValue conforming to the +// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the +// request refers to. Corresponds to the `--bucket` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Bucket(val string) attribute.KeyValue { + return AWSS3BucketKey.String(val) +} + +// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" +// semantic conventions. It represents the S3 object key the request refers to. +// Corresponds to the `--key` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Key(val string) attribute.KeyValue { + return AWSS3KeyKey.String(val) +} + +// AWSS3CopySource returns an attribute KeyValue conforming to the +// "aws.s3.copy_source" semantic conventions. It represents the source object +// (in the form `bucket`/`key`) for the copy operation. +func AWSS3CopySource(val string) attribute.KeyValue { + return AWSS3CopySourceKey.String(val) +} + +// AWSS3UploadID returns an attribute KeyValue conforming to the +// "aws.s3.upload_id" semantic conventions. It represents the upload ID that +// identifies the multipart upload. +func AWSS3UploadID(val string) attribute.KeyValue { + return AWSS3UploadIDKey.String(val) +} + +// AWSS3Delete returns an attribute KeyValue conforming to the +// "aws.s3.delete" semantic conventions. It represents the delete request +// container that specifies the objects to be deleted. +func AWSS3Delete(val string) attribute.KeyValue { + return AWSS3DeleteKey.String(val) +} + +// AWSS3PartNumber returns an attribute KeyValue conforming to the +// "aws.s3.part_number" semantic conventions. It represents the part number of +// the part being uploaded in a multipart-upload operation. This is a positive +// integer between 1 and 10,000. +func AWSS3PartNumber(val int) attribute.KeyValue { + return AWSS3PartNumberKey.Int(val) +} + +// Semantic conventions to apply when instrumenting the GraphQL implementation. +// They map GraphQL operations to attributes on a Span. +const ( + // GraphqlOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of + // the operation being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'findBookByID' + GraphqlOperationNameKey = attribute.Key("graphql.operation.name") + + // GraphqlOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of + // the operation being executed. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'query', 'mutation', 'subscription' + GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") + + // GraphqlDocumentKey is the attribute Key conforming to the + // "graphql.document" semantic conventions. It represents the GraphQL + // document being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + GraphqlDocumentKey = attribute.Key("graphql.document") +) + +var ( + // GraphQL query + GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") + // GraphQL mutation + GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") + // GraphQL subscription + GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") +) + +// GraphqlOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphqlOperationName(val string) attribute.KeyValue { + return GraphqlOperationNameKey.String(val) +} + +// GraphqlDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphqlDocument(val string) attribute.KeyValue { + return GraphqlDocumentKey.String(val) +} + +// General attributes used in messaging systems. +const ( + // MessagingSystemKey is the attribute Key conforming to the + // "messaging.system" semantic conventions. It represents a string + // identifying the messaging system. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' + MessagingSystemKey = attribute.Key("messaging.system") + + // MessagingOperationKey is the attribute Key conforming to the + // "messaging.operation" semantic conventions. It represents a string + // identifying the kind of messaging operation as defined in the [Operation + // names](#operation-names) section above. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationKey = attribute.Key("messaging.operation") + + // MessagingBatchMessageCountKey is the attribute Key conforming to the + // "messaging.batch.message_count" semantic conventions. It represents the + // number of messages sent, received, or processed in the scope of the + // batching operation. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the span describes an + // operation on a batch of messages.) + // Stability: stable + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on + // spans that operate with a single message. When a messaging client + // library supports both batch and single-message API for the same + // operation, instrumentations SHOULD use `messaging.batch.message_count` + // for batching APIs and SHOULD NOT use it for single-message APIs. + MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") +) + +var ( + // publish + MessagingOperationPublish = MessagingOperationKey.String("publish") + // receive + MessagingOperationReceive = MessagingOperationKey.String("receive") + // process + MessagingOperationProcess = MessagingOperationKey.String("process") +) + +// MessagingSystem returns an attribute KeyValue conforming to the +// "messaging.system" semantic conventions. It represents a string identifying +// the messaging system. +func MessagingSystem(val string) attribute.KeyValue { + return MessagingSystemKey.String(val) +} + +// MessagingBatchMessageCount returns an attribute KeyValue conforming to +// the "messaging.batch.message_count" semantic conventions. It represents the +// number of messages sent, received, or processed in the scope of the batching +// operation. +func MessagingBatchMessageCount(val int) attribute.KeyValue { + return MessagingBatchMessageCountKey.Int(val) +} + +// Semantic convention for a consumer of messages received from a messaging +// system +const ( + // MessagingConsumerIDKey is the attribute Key conforming to the + // "messaging.consumer.id" semantic conventions. It represents the + // identifier for the consumer receiving a message. For Kafka, set it to + // `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if + // both are present, or only `messaging.kafka.consumer.group`. For brokers, + // such as RabbitMQ and Artemis, set it to the `client_id` of the client + // consuming the message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'mygroup - client-6' + MessagingConsumerIDKey = attribute.Key("messaging.consumer.id") +) + +// MessagingConsumerID returns an attribute KeyValue conforming to the +// "messaging.consumer.id" semantic conventions. It represents the identifier +// for the consumer receiving a message. For Kafka, set it to +// `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if both +// are present, or only `messaging.kafka.consumer.group`. For brokers, such as +// RabbitMQ and Artemis, set it to the `client_id` of the client consuming the +// message. +func MessagingConsumerID(val string) attribute.KeyValue { + return MessagingConsumerIDKey.String(val) +} + +// Semantic conventions for remote procedure calls. +const ( + // RPCSystemKey is the attribute Key conforming to the "rpc.system" + // semantic conventions. It represents a string identifying the remoting + // system. See below for a list of well-known identifiers. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + RPCSystemKey = attribute.Key("rpc.system") + + // RPCServiceKey is the attribute Key conforming to the "rpc.service" + // semantic conventions. It represents the full (logical) name of the + // service being called, including its package name, if applicable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing + // class. The `code.namespace` attribute may be used to store the latter + // (despite the attribute name, it may include a class name; e.g., class + // with method actually executing the call on the server side, RPC client + // stub class on the client side). + RPCServiceKey = attribute.Key("rpc.service") + + // RPCMethodKey is the attribute Key conforming to the "rpc.method" + // semantic conventions. It represents the name of the (logical) method + // being called, must be equal to the $method part in the span name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function` attribute may be used to store the + // latter (e.g., method actually executing the call on the server side, RPC + // client stub method on the client side). + RPCMethodKey = attribute.Key("rpc.method") +) + +var ( + // gRPC + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") + // Connect RPC + RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") +) + +// RPCService returns an attribute KeyValue conforming to the "rpc.service" +// semantic conventions. It represents the full (logical) name of the service +// being called, including its package name, if applicable. +func RPCService(val string) attribute.KeyValue { + return RPCServiceKey.String(val) +} + +// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" +// semantic conventions. It represents the name of the (logical) method being +// called, must be equal to the $method part in the span name. +func RPCMethod(val string) attribute.KeyValue { + return RPCMethodKey.String(val) +} + +// Tech-specific attributes for gRPC. +const ( + // RPCGRPCStatusCodeKey is the attribute Key conforming to the + // "rpc.grpc.status_code" semantic conventions. It represents the [numeric + // status + // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of + // the gRPC request. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") +) + +var ( + // OK + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). +const ( + // RPCJsonrpcVersionKey is the attribute Key conforming to the + // "rpc.jsonrpc.version" semantic conventions. It represents the protocol + // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 + // does not specify this, the value can be omitted. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If other than the default + // version (`1.0`)) + // Stability: stable + // Examples: '2.0', '1.0' + RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") + + // RPCJsonrpcRequestIDKey is the attribute Key conforming to the + // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` + // property of request or response. Since protocol allows id to be int, + // string, `null` or missing (for notifications), value is expected to be + // cast to string for simplicity. Use empty string in case of `null` value. + // Omit entirely if this is a notification. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10', 'request-7', '' + RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + + // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_code" semantic conventions. It represents the + // `error.code` property of response if it is an error response. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If response is not successful.) + // Stability: stable + // Examples: -32700, 100 + RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + + // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_message" semantic conventions. It represents the + // `error.message` property of response if it is an error response. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Parse error', 'User already exists' + RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") +) + +// RPCJsonrpcVersion returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.version" semantic conventions. It represents the protocol +// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 +// does not specify this, the value can be omitted. +func RPCJsonrpcVersion(val string) attribute.KeyValue { + return RPCJsonrpcVersionKey.String(val) +} + +// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` +// property of request or response. Since protocol allows id to be int, string, +// `null` or missing (for notifications), value is expected to be cast to +// string for simplicity. Use empty string in case of `null` value. Omit +// entirely if this is a notification. +func RPCJsonrpcRequestID(val string) attribute.KeyValue { + return RPCJsonrpcRequestIDKey.String(val) +} + +// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_code" semantic conventions. It represents the +// `error.code` property of response if it is an error response. +func RPCJsonrpcErrorCode(val int) attribute.KeyValue { + return RPCJsonrpcErrorCodeKey.Int(val) +} + +// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_message" semantic conventions. It represents the +// `error.message` property of response if it is an error response. +func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { + return RPCJsonrpcErrorMessageKey.String(val) +} + +// Tech-specific attributes for Connect RPC. +const ( + // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the + // "rpc.connect_rpc.error_code" semantic conventions. It represents the + // [error codes](https://connect.build/docs/protocol/#error-codes) of the + // Connect request. Error codes are always string values. + // + // Type: Enum + // RequirementLevel: ConditionallyRequired (If response is not successful + // and if error code available.) + // Stability: stable + RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") +) + +var ( + // cancelled + RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") + // unknown + RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") + // invalid_argument + RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") + // deadline_exceeded + RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") + // not_found + RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") + // already_exists + RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") + // permission_denied + RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") + // resource_exhausted + RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") + // failed_precondition + RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") + // aborted + RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") + // out_of_range + RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") + // unimplemented + RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") + // internal + RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") + // unavailable + RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") + // data_loss + RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") + // unauthenticated + RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") +) diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index c7aba1c3f4..7b2993a1fe 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -16,5 +16,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.22.0" + return "1.24.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index a9cfb80ae5..1b556e6782 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -14,19 +14,25 @@ module-sets: stable-v1: - version: v1.22.0 + version: v1.24.0 modules: - go.opentelemetry.io/otel + - go.opentelemetry.io/otel/bridge/opencensus + - go.opentelemetry.io/otel/bridge/opencensus/test - go.opentelemetry.io/otel/bridge/opentracing - go.opentelemetry.io/otel/bridge/opentracing/test - go.opentelemetry.io/otel/example/dice - go.opentelemetry.io/otel/example/namedtracer + - go.opentelemetry.io/otel/example/opencensus - go.opentelemetry.io/otel/example/otel-collector - go.opentelemetry.io/otel/example/passthrough - go.opentelemetry.io/otel/example/zipkin + - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc + - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp - go.opentelemetry.io/otel/exporters/otlp/otlptrace - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp + - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric - go.opentelemetry.io/otel/exporters/stdout/stdouttrace - go.opentelemetry.io/otel/exporters/zipkin - go.opentelemetry.io/otel/metric @@ -34,16 +40,14 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.45.0 + version: v0.46.0 modules: - - go.opentelemetry.io/otel/bridge/opencensus - - go.opentelemetry.io/otel/bridge/opencensus/test - - go.opentelemetry.io/otel/example/opencensus - go.opentelemetry.io/otel/example/prometheus - - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc - - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp - go.opentelemetry.io/otel/exporters/prometheus - - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric + experimental-logs: + version: v0.0.1-alpha + modules: + - go.opentelemetry.io/otel/log experimental-schema: version: v0.0.7 modules: diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go index 90a2c3d6dc..09f6a49b80 100644 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -393,7 +393,7 @@ func ReuseTokenSource(t *Token, src TokenSource) TokenSource { } } -// ReuseTokenSource returns a TokenSource that acts in the same manner as the +// ReuseTokenSourceWithExpiry returns a TokenSource that acts in the same manner as the // TokenSource returned by ReuseTokenSource, except the expiry buffer is // configurable. The expiration time of a token is calculated as // t.Expiry.Add(-earlyExpiry). diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index f6e815e6bf..f0b7f3200f 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -1772,6 +1772,8 @@ func parseTarget(target string) (resolver.Target, error) { return resolver.Target{URL: *u}, nil } +// encodeAuthority escapes the authority string based on valid chars defined in +// https://datatracker.ietf.org/doc/html/rfc3986#section-3.2. func encodeAuthority(authority string) string { const upperhex = "0123456789ABCDEF" diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index b7b8fec180..d3796c256e 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -28,6 +28,7 @@ import ( "fmt" "io" "net" + "strings" "sync" "sync/atomic" "time" @@ -362,8 +363,12 @@ func (s *Stream) SendCompress() string { // ClientAdvertisedCompressors returns the compressor names advertised by the // client via grpc-accept-encoding header. -func (s *Stream) ClientAdvertisedCompressors() string { - return s.clientAdvertisedCompressors +func (s *Stream) ClientAdvertisedCompressors() []string { + values := strings.Split(s.clientAdvertisedCompressors, ",") + for i, v := range values { + values[i] = strings.TrimSpace(v) + } + return values } // Done returns a channel which is closed when it receives the final status diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index adf89dd9cf..d72f21c1b3 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -168,6 +168,9 @@ type BuildOptions struct { // field. In most cases though, it is not appropriate, and this field may // be ignored. Dialer func(context.Context, string) (net.Conn, error) + // Authority is the effective authority of the clientconn for which the + // resolver is built. + Authority string } // An Endpoint is one network endpoint, or server, which may have multiple diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go index c79bab1214..f845ac9589 100644 --- a/vendor/google.golang.org/grpc/resolver_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_wrapper.go @@ -75,6 +75,7 @@ func (ccr *ccResolverWrapper) start() error { DialCreds: ccr.cc.dopts.copts.TransportCredentials, CredsBundle: ccr.cc.dopts.copts.CredsBundle, Dialer: ccr.cc.dopts.copts.Dialer, + Authority: ccr.cc.authority, } var err error ccr.resolver, err = ccr.cc.resolverBuilder.Build(ccr.cc.parsedTarget, ccr, opts) diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index d17ede0fa4..82493d237b 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -744,17 +744,19 @@ type payloadInfo struct { uncompressedBytes []byte } -func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) { - pf, buf, err := p.recvMsg(maxReceiveMessageSize) +// recvAndDecompress reads a message from the stream, decompressing it if necessary. +// +// Cancelling the returned cancel function releases the buffer back to the pool. So the caller should cancel as soon as +// the buffer is no longer needed. +func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, +) (uncompressedBuf []byte, cancel func(), err error) { + pf, compressedBuf, err := p.recvMsg(maxReceiveMessageSize) if err != nil { - return nil, err - } - if payInfo != nil { - payInfo.compressedLength = len(buf) + return nil, nil, err } if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { - return nil, st.Err() + return nil, nil, st.Err() } var size int @@ -762,21 +764,35 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, // use this decompressor as the default. if dc != nil { - buf, err = dc.Do(bytes.NewReader(buf)) - size = len(buf) + uncompressedBuf, err = dc.Do(bytes.NewReader(compressedBuf)) + size = len(uncompressedBuf) } else { - buf, size, err = decompress(compressor, buf, maxReceiveMessageSize) + uncompressedBuf, size, err = decompress(compressor, compressedBuf, maxReceiveMessageSize) } if err != nil { - return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) + return nil, nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) } if size > maxReceiveMessageSize { // TODO: Revisit the error code. Currently keep it consistent with java // implementation. - return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) + return nil, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) } + } else { + uncompressedBuf = compressedBuf } - return buf, nil + + if payInfo != nil { + payInfo.compressedLength = len(compressedBuf) + payInfo.uncompressedBytes = uncompressedBuf + + cancel = func() {} + } else { + cancel = func() { + p.recvBufferPool.Put(&compressedBuf) + } + } + + return uncompressedBuf, cancel, nil } // Using compressor, decompress d, returning data and size. @@ -796,6 +812,9 @@ func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize // size is used as an estimate to size the buffer, but we // will read more data if available. // +MinRead so ReadFrom will not reallocate if size is correct. + // + // TODO: If we ensure that the buffer size is the same as the DecompressedSize, + // we can also utilize the recv buffer pool here. buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) return buf.Bytes(), int(bytesRead), err @@ -811,18 +830,15 @@ func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize // dc takes precedence over compressor. // TODO(dfawley): wrap the old compressor/decompressor using the new API? func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { - buf, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) + buf, cancel, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) if err != nil { return err } + defer cancel() + if err := c.Unmarshal(buf, m); err != nil { return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err) } - if payInfo != nil { - payInfo.uncompressedBytes = buf - } else { - p.recvBufferPool.Put(&buf) - } return nil } diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 0bf5c78b0d..a6a11704b3 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -1342,7 +1342,8 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor if len(shs) != 0 || len(binlogs) != 0 { payInfo = &payloadInfo{} } - d, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) + + d, cancel, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) if err != nil { if e := t.WriteStatus(stream, status.Convert(err)); e != nil { channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) @@ -1353,6 +1354,8 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor t.IncrMsgRecv() } df := func(v any) error { + defer cancel() + if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } @@ -2117,7 +2120,7 @@ func ClientSupportedCompressors(ctx context.Context) ([]string, error) { return nil, fmt.Errorf("failed to fetch the stream from the given context %v", ctx) } - return strings.Split(stream.ClientAdvertisedCompressors(), ","), nil + return stream.ClientAdvertisedCompressors(), nil } // SetTrailer sets the trailer metadata that will be sent when an RPC returns. @@ -2157,7 +2160,7 @@ func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric { // validateSendCompressor returns an error when given compressor name cannot be // handled by the server or the client based on the advertised compressors. -func validateSendCompressor(name, clientCompressors string) error { +func validateSendCompressor(name string, clientCompressors []string) error { if name == encoding.Identity { return nil } @@ -2166,7 +2169,7 @@ func validateSendCompressor(name, clientCompressors string) error { return fmt.Errorf("compressor not registered %q", name) } - for _, c := range strings.Split(clientCompressors, ",") { + for _, c := range clientCompressors { if c == name { return nil // found match } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index df85a021ad..46ad8113ff 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.62.0" +const Version = "1.62.1" diff --git a/vendor/gopkg.in/yaml.v2/.travis.yml b/vendor/gopkg.in/yaml.v2/.travis.yml deleted file mode 100644 index 7348c50c0c..0000000000 --- a/vendor/gopkg.in/yaml.v2/.travis.yml +++ /dev/null @@ -1,17 +0,0 @@ -language: go - -go: - - "1.4.x" - - "1.5.x" - - "1.6.x" - - "1.7.x" - - "1.8.x" - - "1.9.x" - - "1.10.x" - - "1.11.x" - - "1.12.x" - - "1.13.x" - - "1.14.x" - - "tip" - -go_import_path: gopkg.in/yaml.v2 diff --git a/vendor/gopkg.in/yaml.v2/LICENSE.libyaml b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml deleted file mode 100644 index 8da58fbf6f..0000000000 --- a/vendor/gopkg.in/yaml.v2/LICENSE.libyaml +++ /dev/null @@ -1,31 +0,0 @@ -The following files were ported to Go from C files of libyaml, and thus -are still covered by their original copyright and license: - - apic.go - emitterc.go - parserc.go - readerc.go - scannerc.go - writerc.go - yamlh.go - yamlprivateh.go - -Copyright (c) 2006 Kirill Simonov - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/gopkg.in/yaml.v2/NOTICE b/vendor/gopkg.in/yaml.v2/NOTICE deleted file mode 100644 index 866d74a7ad..0000000000 --- a/vendor/gopkg.in/yaml.v2/NOTICE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2011-2016 Canonical Ltd. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/gopkg.in/yaml.v2/README.md b/vendor/gopkg.in/yaml.v2/README.md deleted file mode 100644 index b50c6e8775..0000000000 --- a/vendor/gopkg.in/yaml.v2/README.md +++ /dev/null @@ -1,133 +0,0 @@ -# YAML support for the Go language - -Introduction ------------- - -The yaml package enables Go programs to comfortably encode and decode YAML -values. It was developed within [Canonical](https://www.canonical.com) as -part of the [juju](https://juju.ubuntu.com) project, and is based on a -pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) -C library to parse and generate YAML data quickly and reliably. - -Compatibility -------------- - -The yaml package supports most of YAML 1.1 and 1.2, including support for -anchors, tags, map merging, etc. Multi-document unmarshalling is not yet -implemented, and base-60 floats from YAML 1.1 are purposefully not -supported since they're a poor design and are gone in YAML 1.2. - -Installation and usage ----------------------- - -The import path for the package is *gopkg.in/yaml.v2*. - -To install it, run: - - go get gopkg.in/yaml.v2 - -API documentation ------------------ - -If opened in a browser, the import path itself leads to the API documentation: - - * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) - -API stability -------------- - -The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). - - -License -------- - -The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details. - - -Example -------- - -```Go -package main - -import ( - "fmt" - "log" - - "gopkg.in/yaml.v2" -) - -var data = ` -a: Easy! -b: - c: 2 - d: [3, 4] -` - -// Note: struct fields must be public in order for unmarshal to -// correctly populate the data. -type T struct { - A string - B struct { - RenamedC int `yaml:"c"` - D []int `yaml:",flow"` - } -} - -func main() { - t := T{} - - err := yaml.Unmarshal([]byte(data), &t) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- t:\n%v\n\n", t) - - d, err := yaml.Marshal(&t) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- t dump:\n%s\n\n", string(d)) - - m := make(map[interface{}]interface{}) - - err = yaml.Unmarshal([]byte(data), &m) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- m:\n%v\n\n", m) - - d, err = yaml.Marshal(&m) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- m dump:\n%s\n\n", string(d)) -} -``` - -This example will generate the following output: - -``` ---- t: -{Easy! {2 [3 4]}} - ---- t dump: -a: Easy! -b: - c: 2 - d: [3, 4] - - ---- m: -map[a:Easy! b:map[c:2 d:[3 4]]] - ---- m dump: -a: Easy! -b: - c: 2 - d: - - 3 - - 4 -``` - diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/gopkg.in/yaml.v2/apic.go deleted file mode 100644 index acf71402cf..0000000000 --- a/vendor/gopkg.in/yaml.v2/apic.go +++ /dev/null @@ -1,744 +0,0 @@ -package yaml - -import ( - "io" -) - -func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { - //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) - - // Check if we can move the queue at the beginning of the buffer. - if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { - if parser.tokens_head != len(parser.tokens) { - copy(parser.tokens, parser.tokens[parser.tokens_head:]) - } - parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] - parser.tokens_head = 0 - } - parser.tokens = append(parser.tokens, *token) - if pos < 0 { - return - } - copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) - parser.tokens[parser.tokens_head+pos] = *token -} - -// Create a new parser object. -func yaml_parser_initialize(parser *yaml_parser_t) bool { - *parser = yaml_parser_t{ - raw_buffer: make([]byte, 0, input_raw_buffer_size), - buffer: make([]byte, 0, input_buffer_size), - } - return true -} - -// Destroy a parser object. -func yaml_parser_delete(parser *yaml_parser_t) { - *parser = yaml_parser_t{} -} - -// String read handler. -func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { - if parser.input_pos == len(parser.input) { - return 0, io.EOF - } - n = copy(buffer, parser.input[parser.input_pos:]) - parser.input_pos += n - return n, nil -} - -// Reader read handler. -func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { - return parser.input_reader.Read(buffer) -} - -// Set a string input. -func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { - if parser.read_handler != nil { - panic("must set the input source only once") - } - parser.read_handler = yaml_string_read_handler - parser.input = input - parser.input_pos = 0 -} - -// Set a file input. -func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { - if parser.read_handler != nil { - panic("must set the input source only once") - } - parser.read_handler = yaml_reader_read_handler - parser.input_reader = r -} - -// Set the source encoding. -func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { - if parser.encoding != yaml_ANY_ENCODING { - panic("must set the encoding only once") - } - parser.encoding = encoding -} - -var disableLineWrapping = false - -// Create a new emitter object. -func yaml_emitter_initialize(emitter *yaml_emitter_t) { - *emitter = yaml_emitter_t{ - buffer: make([]byte, output_buffer_size), - raw_buffer: make([]byte, 0, output_raw_buffer_size), - states: make([]yaml_emitter_state_t, 0, initial_stack_size), - events: make([]yaml_event_t, 0, initial_queue_size), - } - if disableLineWrapping { - emitter.best_width = -1 - } -} - -// Destroy an emitter object. -func yaml_emitter_delete(emitter *yaml_emitter_t) { - *emitter = yaml_emitter_t{} -} - -// String write handler. -func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { - *emitter.output_buffer = append(*emitter.output_buffer, buffer...) - return nil -} - -// yaml_writer_write_handler uses emitter.output_writer to write the -// emitted text. -func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { - _, err := emitter.output_writer.Write(buffer) - return err -} - -// Set a string output. -func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { - if emitter.write_handler != nil { - panic("must set the output target only once") - } - emitter.write_handler = yaml_string_write_handler - emitter.output_buffer = output_buffer -} - -// Set a file output. -func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { - if emitter.write_handler != nil { - panic("must set the output target only once") - } - emitter.write_handler = yaml_writer_write_handler - emitter.output_writer = w -} - -// Set the output encoding. -func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { - if emitter.encoding != yaml_ANY_ENCODING { - panic("must set the output encoding only once") - } - emitter.encoding = encoding -} - -// Set the canonical output style. -func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { - emitter.canonical = canonical -} - -//// Set the indentation increment. -func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { - if indent < 2 || indent > 9 { - indent = 2 - } - emitter.best_indent = indent -} - -// Set the preferred line width. -func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { - if width < 0 { - width = -1 - } - emitter.best_width = width -} - -// Set if unescaped non-ASCII characters are allowed. -func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { - emitter.unicode = unicode -} - -// Set the preferred line break character. -func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { - emitter.line_break = line_break -} - -///* -// * Destroy a token object. -// */ -// -//YAML_DECLARE(void) -//yaml_token_delete(yaml_token_t *token) -//{ -// assert(token); // Non-NULL token object expected. -// -// switch (token.type) -// { -// case YAML_TAG_DIRECTIVE_TOKEN: -// yaml_free(token.data.tag_directive.handle); -// yaml_free(token.data.tag_directive.prefix); -// break; -// -// case YAML_ALIAS_TOKEN: -// yaml_free(token.data.alias.value); -// break; -// -// case YAML_ANCHOR_TOKEN: -// yaml_free(token.data.anchor.value); -// break; -// -// case YAML_TAG_TOKEN: -// yaml_free(token.data.tag.handle); -// yaml_free(token.data.tag.suffix); -// break; -// -// case YAML_SCALAR_TOKEN: -// yaml_free(token.data.scalar.value); -// break; -// -// default: -// break; -// } -// -// memset(token, 0, sizeof(yaml_token_t)); -//} -// -///* -// * Check if a string is a valid UTF-8 sequence. -// * -// * Check 'reader.c' for more details on UTF-8 encoding. -// */ -// -//static int -//yaml_check_utf8(yaml_char_t *start, size_t length) -//{ -// yaml_char_t *end = start+length; -// yaml_char_t *pointer = start; -// -// while (pointer < end) { -// unsigned char octet; -// unsigned int width; -// unsigned int value; -// size_t k; -// -// octet = pointer[0]; -// width = (octet & 0x80) == 0x00 ? 1 : -// (octet & 0xE0) == 0xC0 ? 2 : -// (octet & 0xF0) == 0xE0 ? 3 : -// (octet & 0xF8) == 0xF0 ? 4 : 0; -// value = (octet & 0x80) == 0x00 ? octet & 0x7F : -// (octet & 0xE0) == 0xC0 ? octet & 0x1F : -// (octet & 0xF0) == 0xE0 ? octet & 0x0F : -// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; -// if (!width) return 0; -// if (pointer+width > end) return 0; -// for (k = 1; k < width; k ++) { -// octet = pointer[k]; -// if ((octet & 0xC0) != 0x80) return 0; -// value = (value << 6) + (octet & 0x3F); -// } -// if (!((width == 1) || -// (width == 2 && value >= 0x80) || -// (width == 3 && value >= 0x800) || -// (width == 4 && value >= 0x10000))) return 0; -// -// pointer += width; -// } -// -// return 1; -//} -// - -// Create STREAM-START. -func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { - *event = yaml_event_t{ - typ: yaml_STREAM_START_EVENT, - encoding: encoding, - } -} - -// Create STREAM-END. -func yaml_stream_end_event_initialize(event *yaml_event_t) { - *event = yaml_event_t{ - typ: yaml_STREAM_END_EVENT, - } -} - -// Create DOCUMENT-START. -func yaml_document_start_event_initialize( - event *yaml_event_t, - version_directive *yaml_version_directive_t, - tag_directives []yaml_tag_directive_t, - implicit bool, -) { - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - version_directive: version_directive, - tag_directives: tag_directives, - implicit: implicit, - } -} - -// Create DOCUMENT-END. -func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { - *event = yaml_event_t{ - typ: yaml_DOCUMENT_END_EVENT, - implicit: implicit, - } -} - -///* -// * Create ALIAS. -// */ -// -//YAML_DECLARE(int) -//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) -//{ -// mark yaml_mark_t = { 0, 0, 0 } -// anchor_copy *yaml_char_t = NULL -// -// assert(event) // Non-NULL event object is expected. -// assert(anchor) // Non-NULL anchor is expected. -// -// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 -// -// anchor_copy = yaml_strdup(anchor) -// if (!anchor_copy) -// return 0 -// -// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) -// -// return 1 -//} - -// Create SCALAR. -func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - anchor: anchor, - tag: tag, - value: value, - implicit: plain_implicit, - quoted_implicit: quoted_implicit, - style: yaml_style_t(style), - } - return true -} - -// Create SEQUENCE-START. -func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(style), - } - return true -} - -// Create SEQUENCE-END. -func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - } - return true -} - -// Create MAPPING-START. -func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(style), - } -} - -// Create MAPPING-END. -func yaml_mapping_end_event_initialize(event *yaml_event_t) { - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - } -} - -// Destroy an event object. -func yaml_event_delete(event *yaml_event_t) { - *event = yaml_event_t{} -} - -///* -// * Create a document object. -// */ -// -//YAML_DECLARE(int) -//yaml_document_initialize(document *yaml_document_t, -// version_directive *yaml_version_directive_t, -// tag_directives_start *yaml_tag_directive_t, -// tag_directives_end *yaml_tag_directive_t, -// start_implicit int, end_implicit int) -//{ -// struct { -// error yaml_error_type_t -// } context -// struct { -// start *yaml_node_t -// end *yaml_node_t -// top *yaml_node_t -// } nodes = { NULL, NULL, NULL } -// version_directive_copy *yaml_version_directive_t = NULL -// struct { -// start *yaml_tag_directive_t -// end *yaml_tag_directive_t -// top *yaml_tag_directive_t -// } tag_directives_copy = { NULL, NULL, NULL } -// value yaml_tag_directive_t = { NULL, NULL } -// mark yaml_mark_t = { 0, 0, 0 } -// -// assert(document) // Non-NULL document object is expected. -// assert((tag_directives_start && tag_directives_end) || -// (tag_directives_start == tag_directives_end)) -// // Valid tag directives are expected. -// -// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error -// -// if (version_directive) { -// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) -// if (!version_directive_copy) goto error -// version_directive_copy.major = version_directive.major -// version_directive_copy.minor = version_directive.minor -// } -// -// if (tag_directives_start != tag_directives_end) { -// tag_directive *yaml_tag_directive_t -// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) -// goto error -// for (tag_directive = tag_directives_start -// tag_directive != tag_directives_end; tag_directive ++) { -// assert(tag_directive.handle) -// assert(tag_directive.prefix) -// if (!yaml_check_utf8(tag_directive.handle, -// strlen((char *)tag_directive.handle))) -// goto error -// if (!yaml_check_utf8(tag_directive.prefix, -// strlen((char *)tag_directive.prefix))) -// goto error -// value.handle = yaml_strdup(tag_directive.handle) -// value.prefix = yaml_strdup(tag_directive.prefix) -// if (!value.handle || !value.prefix) goto error -// if (!PUSH(&context, tag_directives_copy, value)) -// goto error -// value.handle = NULL -// value.prefix = NULL -// } -// } -// -// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, -// tag_directives_copy.start, tag_directives_copy.top, -// start_implicit, end_implicit, mark, mark) -// -// return 1 -// -//error: -// STACK_DEL(&context, nodes) -// yaml_free(version_directive_copy) -// while (!STACK_EMPTY(&context, tag_directives_copy)) { -// value yaml_tag_directive_t = POP(&context, tag_directives_copy) -// yaml_free(value.handle) -// yaml_free(value.prefix) -// } -// STACK_DEL(&context, tag_directives_copy) -// yaml_free(value.handle) -// yaml_free(value.prefix) -// -// return 0 -//} -// -///* -// * Destroy a document object. -// */ -// -//YAML_DECLARE(void) -//yaml_document_delete(document *yaml_document_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// tag_directive *yaml_tag_directive_t -// -// context.error = YAML_NO_ERROR // Eliminate a compiler warning. -// -// assert(document) // Non-NULL document object is expected. -// -// while (!STACK_EMPTY(&context, document.nodes)) { -// node yaml_node_t = POP(&context, document.nodes) -// yaml_free(node.tag) -// switch (node.type) { -// case YAML_SCALAR_NODE: -// yaml_free(node.data.scalar.value) -// break -// case YAML_SEQUENCE_NODE: -// STACK_DEL(&context, node.data.sequence.items) -// break -// case YAML_MAPPING_NODE: -// STACK_DEL(&context, node.data.mapping.pairs) -// break -// default: -// assert(0) // Should not happen. -// } -// } -// STACK_DEL(&context, document.nodes) -// -// yaml_free(document.version_directive) -// for (tag_directive = document.tag_directives.start -// tag_directive != document.tag_directives.end -// tag_directive++) { -// yaml_free(tag_directive.handle) -// yaml_free(tag_directive.prefix) -// } -// yaml_free(document.tag_directives.start) -// -// memset(document, 0, sizeof(yaml_document_t)) -//} -// -///** -// * Get a document node. -// */ -// -//YAML_DECLARE(yaml_node_t *) -//yaml_document_get_node(document *yaml_document_t, index int) -//{ -// assert(document) // Non-NULL document object is expected. -// -// if (index > 0 && document.nodes.start + index <= document.nodes.top) { -// return document.nodes.start + index - 1 -// } -// return NULL -//} -// -///** -// * Get the root object. -// */ -// -//YAML_DECLARE(yaml_node_t *) -//yaml_document_get_root_node(document *yaml_document_t) -//{ -// assert(document) // Non-NULL document object is expected. -// -// if (document.nodes.top != document.nodes.start) { -// return document.nodes.start -// } -// return NULL -//} -// -///* -// * Add a scalar node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_scalar(document *yaml_document_t, -// tag *yaml_char_t, value *yaml_char_t, length int, -// style yaml_scalar_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// value_copy *yaml_char_t = NULL -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// assert(value) // Non-NULL value is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (length < 0) { -// length = strlen((char *)value) -// } -// -// if (!yaml_check_utf8(value, length)) goto error -// value_copy = yaml_malloc(length+1) -// if (!value_copy) goto error -// memcpy(value_copy, value, length) -// value_copy[length] = '\0' -// -// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// yaml_free(tag_copy) -// yaml_free(value_copy) -// -// return 0 -//} -// -///* -// * Add a sequence node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_sequence(document *yaml_document_t, -// tag *yaml_char_t, style yaml_sequence_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// struct { -// start *yaml_node_item_t -// end *yaml_node_item_t -// top *yaml_node_item_t -// } items = { NULL, NULL, NULL } -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error -// -// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, -// style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// STACK_DEL(&context, items) -// yaml_free(tag_copy) -// -// return 0 -//} -// -///* -// * Add a mapping node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_mapping(document *yaml_document_t, -// tag *yaml_char_t, style yaml_mapping_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// struct { -// start *yaml_node_pair_t -// end *yaml_node_pair_t -// top *yaml_node_pair_t -// } pairs = { NULL, NULL, NULL } -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error -// -// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, -// style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// STACK_DEL(&context, pairs) -// yaml_free(tag_copy) -// -// return 0 -//} -// -///* -// * Append an item to a sequence node. -// */ -// -//YAML_DECLARE(int) -//yaml_document_append_sequence_item(document *yaml_document_t, -// sequence int, item int) -//{ -// struct { -// error yaml_error_type_t -// } context -// -// assert(document) // Non-NULL document is required. -// assert(sequence > 0 -// && document.nodes.start + sequence <= document.nodes.top) -// // Valid sequence id is required. -// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) -// // A sequence node is required. -// assert(item > 0 && document.nodes.start + item <= document.nodes.top) -// // Valid item id is required. -// -// if (!PUSH(&context, -// document.nodes.start[sequence-1].data.sequence.items, item)) -// return 0 -// -// return 1 -//} -// -///* -// * Append a pair of a key and a value to a mapping node. -// */ -// -//YAML_DECLARE(int) -//yaml_document_append_mapping_pair(document *yaml_document_t, -// mapping int, key int, value int) -//{ -// struct { -// error yaml_error_type_t -// } context -// -// pair yaml_node_pair_t -// -// assert(document) // Non-NULL document is required. -// assert(mapping > 0 -// && document.nodes.start + mapping <= document.nodes.top) -// // Valid mapping id is required. -// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) -// // A mapping node is required. -// assert(key > 0 && document.nodes.start + key <= document.nodes.top) -// // Valid key id is required. -// assert(value > 0 && document.nodes.start + value <= document.nodes.top) -// // Valid value id is required. -// -// pair.key = key -// pair.value = value -// -// if (!PUSH(&context, -// document.nodes.start[mapping-1].data.mapping.pairs, pair)) -// return 0 -// -// return 1 -//} -// -// diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v2/decode.go deleted file mode 100644 index 129bc2a97d..0000000000 --- a/vendor/gopkg.in/yaml.v2/decode.go +++ /dev/null @@ -1,815 +0,0 @@ -package yaml - -import ( - "encoding" - "encoding/base64" - "fmt" - "io" - "math" - "reflect" - "strconv" - "time" -) - -const ( - documentNode = 1 << iota - mappingNode - sequenceNode - scalarNode - aliasNode -) - -type node struct { - kind int - line, column int - tag string - // For an alias node, alias holds the resolved alias. - alias *node - value string - implicit bool - children []*node - anchors map[string]*node -} - -// ---------------------------------------------------------------------------- -// Parser, produces a node tree out of a libyaml event stream. - -type parser struct { - parser yaml_parser_t - event yaml_event_t - doc *node - doneInit bool -} - -func newParser(b []byte) *parser { - p := parser{} - if !yaml_parser_initialize(&p.parser) { - panic("failed to initialize YAML emitter") - } - if len(b) == 0 { - b = []byte{'\n'} - } - yaml_parser_set_input_string(&p.parser, b) - return &p -} - -func newParserFromReader(r io.Reader) *parser { - p := parser{} - if !yaml_parser_initialize(&p.parser) { - panic("failed to initialize YAML emitter") - } - yaml_parser_set_input_reader(&p.parser, r) - return &p -} - -func (p *parser) init() { - if p.doneInit { - return - } - p.expect(yaml_STREAM_START_EVENT) - p.doneInit = true -} - -func (p *parser) destroy() { - if p.event.typ != yaml_NO_EVENT { - yaml_event_delete(&p.event) - } - yaml_parser_delete(&p.parser) -} - -// expect consumes an event from the event stream and -// checks that it's of the expected type. -func (p *parser) expect(e yaml_event_type_t) { - if p.event.typ == yaml_NO_EVENT { - if !yaml_parser_parse(&p.parser, &p.event) { - p.fail() - } - } - if p.event.typ == yaml_STREAM_END_EVENT { - failf("attempted to go past the end of stream; corrupted value?") - } - if p.event.typ != e { - p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) - p.fail() - } - yaml_event_delete(&p.event) - p.event.typ = yaml_NO_EVENT -} - -// peek peeks at the next event in the event stream, -// puts the results into p.event and returns the event type. -func (p *parser) peek() yaml_event_type_t { - if p.event.typ != yaml_NO_EVENT { - return p.event.typ - } - if !yaml_parser_parse(&p.parser, &p.event) { - p.fail() - } - return p.event.typ -} - -func (p *parser) fail() { - var where string - var line int - if p.parser.problem_mark.line != 0 { - line = p.parser.problem_mark.line - // Scanner errors don't iterate line before returning error - if p.parser.error == yaml_SCANNER_ERROR { - line++ - } - } else if p.parser.context_mark.line != 0 { - line = p.parser.context_mark.line - } - if line != 0 { - where = "line " + strconv.Itoa(line) + ": " - } - var msg string - if len(p.parser.problem) > 0 { - msg = p.parser.problem - } else { - msg = "unknown problem parsing YAML content" - } - failf("%s%s", where, msg) -} - -func (p *parser) anchor(n *node, anchor []byte) { - if anchor != nil { - p.doc.anchors[string(anchor)] = n - } -} - -func (p *parser) parse() *node { - p.init() - switch p.peek() { - case yaml_SCALAR_EVENT: - return p.scalar() - case yaml_ALIAS_EVENT: - return p.alias() - case yaml_MAPPING_START_EVENT: - return p.mapping() - case yaml_SEQUENCE_START_EVENT: - return p.sequence() - case yaml_DOCUMENT_START_EVENT: - return p.document() - case yaml_STREAM_END_EVENT: - // Happens when attempting to decode an empty buffer. - return nil - default: - panic("attempted to parse unknown event: " + p.event.typ.String()) - } -} - -func (p *parser) node(kind int) *node { - return &node{ - kind: kind, - line: p.event.start_mark.line, - column: p.event.start_mark.column, - } -} - -func (p *parser) document() *node { - n := p.node(documentNode) - n.anchors = make(map[string]*node) - p.doc = n - p.expect(yaml_DOCUMENT_START_EVENT) - n.children = append(n.children, p.parse()) - p.expect(yaml_DOCUMENT_END_EVENT) - return n -} - -func (p *parser) alias() *node { - n := p.node(aliasNode) - n.value = string(p.event.anchor) - n.alias = p.doc.anchors[n.value] - if n.alias == nil { - failf("unknown anchor '%s' referenced", n.value) - } - p.expect(yaml_ALIAS_EVENT) - return n -} - -func (p *parser) scalar() *node { - n := p.node(scalarNode) - n.value = string(p.event.value) - n.tag = string(p.event.tag) - n.implicit = p.event.implicit - p.anchor(n, p.event.anchor) - p.expect(yaml_SCALAR_EVENT) - return n -} - -func (p *parser) sequence() *node { - n := p.node(sequenceNode) - p.anchor(n, p.event.anchor) - p.expect(yaml_SEQUENCE_START_EVENT) - for p.peek() != yaml_SEQUENCE_END_EVENT { - n.children = append(n.children, p.parse()) - } - p.expect(yaml_SEQUENCE_END_EVENT) - return n -} - -func (p *parser) mapping() *node { - n := p.node(mappingNode) - p.anchor(n, p.event.anchor) - p.expect(yaml_MAPPING_START_EVENT) - for p.peek() != yaml_MAPPING_END_EVENT { - n.children = append(n.children, p.parse(), p.parse()) - } - p.expect(yaml_MAPPING_END_EVENT) - return n -} - -// ---------------------------------------------------------------------------- -// Decoder, unmarshals a node into a provided value. - -type decoder struct { - doc *node - aliases map[*node]bool - mapType reflect.Type - terrors []string - strict bool - - decodeCount int - aliasCount int - aliasDepth int -} - -var ( - mapItemType = reflect.TypeOf(MapItem{}) - durationType = reflect.TypeOf(time.Duration(0)) - defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) - ifaceType = defaultMapType.Elem() - timeType = reflect.TypeOf(time.Time{}) - ptrTimeType = reflect.TypeOf(&time.Time{}) -) - -func newDecoder(strict bool) *decoder { - d := &decoder{mapType: defaultMapType, strict: strict} - d.aliases = make(map[*node]bool) - return d -} - -func (d *decoder) terror(n *node, tag string, out reflect.Value) { - if n.tag != "" { - tag = n.tag - } - value := n.value - if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG { - if len(value) > 10 { - value = " `" + value[:7] + "...`" - } else { - value = " `" + value + "`" - } - } - d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type())) -} - -func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { - terrlen := len(d.terrors) - err := u.UnmarshalYAML(func(v interface{}) (err error) { - defer handleErr(&err) - d.unmarshal(n, reflect.ValueOf(v)) - if len(d.terrors) > terrlen { - issues := d.terrors[terrlen:] - d.terrors = d.terrors[:terrlen] - return &TypeError{issues} - } - return nil - }) - if e, ok := err.(*TypeError); ok { - d.terrors = append(d.terrors, e.Errors...) - return false - } - if err != nil { - fail(err) - } - return true -} - -// d.prepare initializes and dereferences pointers and calls UnmarshalYAML -// if a value is found to implement it. -// It returns the initialized and dereferenced out value, whether -// unmarshalling was already done by UnmarshalYAML, and if so whether -// its types unmarshalled appropriately. -// -// If n holds a null value, prepare returns before doing anything. -func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { - if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) { - return out, false, false - } - again := true - for again { - again = false - if out.Kind() == reflect.Ptr { - if out.IsNil() { - out.Set(reflect.New(out.Type().Elem())) - } - out = out.Elem() - again = true - } - if out.CanAddr() { - if u, ok := out.Addr().Interface().(Unmarshaler); ok { - good = d.callUnmarshaler(n, u) - return out, true, good - } - } - } - return out, false, false -} - -const ( - // 400,000 decode operations is ~500kb of dense object declarations, or - // ~5kb of dense object declarations with 10000% alias expansion - alias_ratio_range_low = 400000 - - // 4,000,000 decode operations is ~5MB of dense object declarations, or - // ~4.5MB of dense object declarations with 10% alias expansion - alias_ratio_range_high = 4000000 - - // alias_ratio_range is the range over which we scale allowed alias ratios - alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) -) - -func allowedAliasRatio(decodeCount int) float64 { - switch { - case decodeCount <= alias_ratio_range_low: - // allow 99% to come from alias expansion for small-to-medium documents - return 0.99 - case decodeCount >= alias_ratio_range_high: - // allow 10% to come from alias expansion for very large documents - return 0.10 - default: - // scale smoothly from 99% down to 10% over the range. - // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. - // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). - return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) - } -} - -func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { - d.decodeCount++ - if d.aliasDepth > 0 { - d.aliasCount++ - } - if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { - failf("document contains excessive aliasing") - } - switch n.kind { - case documentNode: - return d.document(n, out) - case aliasNode: - return d.alias(n, out) - } - out, unmarshaled, good := d.prepare(n, out) - if unmarshaled { - return good - } - switch n.kind { - case scalarNode: - good = d.scalar(n, out) - case mappingNode: - good = d.mapping(n, out) - case sequenceNode: - good = d.sequence(n, out) - default: - panic("internal error: unknown node kind: " + strconv.Itoa(n.kind)) - } - return good -} - -func (d *decoder) document(n *node, out reflect.Value) (good bool) { - if len(n.children) == 1 { - d.doc = n - d.unmarshal(n.children[0], out) - return true - } - return false -} - -func (d *decoder) alias(n *node, out reflect.Value) (good bool) { - if d.aliases[n] { - // TODO this could actually be allowed in some circumstances. - failf("anchor '%s' value contains itself", n.value) - } - d.aliases[n] = true - d.aliasDepth++ - good = d.unmarshal(n.alias, out) - d.aliasDepth-- - delete(d.aliases, n) - return good -} - -var zeroValue reflect.Value - -func resetMap(out reflect.Value) { - for _, k := range out.MapKeys() { - out.SetMapIndex(k, zeroValue) - } -} - -func (d *decoder) scalar(n *node, out reflect.Value) bool { - var tag string - var resolved interface{} - if n.tag == "" && !n.implicit { - tag = yaml_STR_TAG - resolved = n.value - } else { - tag, resolved = resolve(n.tag, n.value) - if tag == yaml_BINARY_TAG { - data, err := base64.StdEncoding.DecodeString(resolved.(string)) - if err != nil { - failf("!!binary value contains invalid base64 data") - } - resolved = string(data) - } - } - if resolved == nil { - if out.Kind() == reflect.Map && !out.CanAddr() { - resetMap(out) - } else { - out.Set(reflect.Zero(out.Type())) - } - return true - } - if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { - // We've resolved to exactly the type we want, so use that. - out.Set(resolvedv) - return true - } - // Perhaps we can use the value as a TextUnmarshaler to - // set its value. - if out.CanAddr() { - u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) - if ok { - var text []byte - if tag == yaml_BINARY_TAG { - text = []byte(resolved.(string)) - } else { - // We let any value be unmarshaled into TextUnmarshaler. - // That might be more lax than we'd like, but the - // TextUnmarshaler itself should bowl out any dubious values. - text = []byte(n.value) - } - err := u.UnmarshalText(text) - if err != nil { - fail(err) - } - return true - } - } - switch out.Kind() { - case reflect.String: - if tag == yaml_BINARY_TAG { - out.SetString(resolved.(string)) - return true - } - if resolved != nil { - out.SetString(n.value) - return true - } - case reflect.Interface: - if resolved == nil { - out.Set(reflect.Zero(out.Type())) - } else if tag == yaml_TIMESTAMP_TAG { - // It looks like a timestamp but for backward compatibility - // reasons we set it as a string, so that code that unmarshals - // timestamp-like values into interface{} will continue to - // see a string and not a time.Time. - // TODO(v3) Drop this. - out.Set(reflect.ValueOf(n.value)) - } else { - out.Set(reflect.ValueOf(resolved)) - } - return true - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - switch resolved := resolved.(type) { - case int: - if !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - return true - } - case int64: - if !out.OverflowInt(resolved) { - out.SetInt(resolved) - return true - } - case uint64: - if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - return true - } - case float64: - if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - return true - } - case string: - if out.Type() == durationType { - d, err := time.ParseDuration(resolved) - if err == nil { - out.SetInt(int64(d)) - return true - } - } - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - switch resolved := resolved.(type) { - case int: - if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - return true - } - case int64: - if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - return true - } - case uint64: - if !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - return true - } - case float64: - if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - return true - } - } - case reflect.Bool: - switch resolved := resolved.(type) { - case bool: - out.SetBool(resolved) - return true - } - case reflect.Float32, reflect.Float64: - switch resolved := resolved.(type) { - case int: - out.SetFloat(float64(resolved)) - return true - case int64: - out.SetFloat(float64(resolved)) - return true - case uint64: - out.SetFloat(float64(resolved)) - return true - case float64: - out.SetFloat(resolved) - return true - } - case reflect.Struct: - if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { - out.Set(resolvedv) - return true - } - case reflect.Ptr: - if out.Type().Elem() == reflect.TypeOf(resolved) { - // TODO DOes this make sense? When is out a Ptr except when decoding a nil value? - elem := reflect.New(out.Type().Elem()) - elem.Elem().Set(reflect.ValueOf(resolved)) - out.Set(elem) - return true - } - } - d.terror(n, tag, out) - return false -} - -func settableValueOf(i interface{}) reflect.Value { - v := reflect.ValueOf(i) - sv := reflect.New(v.Type()).Elem() - sv.Set(v) - return sv -} - -func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { - l := len(n.children) - - var iface reflect.Value - switch out.Kind() { - case reflect.Slice: - out.Set(reflect.MakeSlice(out.Type(), l, l)) - case reflect.Array: - if l != out.Len() { - failf("invalid array: want %d elements but got %d", out.Len(), l) - } - case reflect.Interface: - // No type hints. Will have to use a generic sequence. - iface = out - out = settableValueOf(make([]interface{}, l)) - default: - d.terror(n, yaml_SEQ_TAG, out) - return false - } - et := out.Type().Elem() - - j := 0 - for i := 0; i < l; i++ { - e := reflect.New(et).Elem() - if ok := d.unmarshal(n.children[i], e); ok { - out.Index(j).Set(e) - j++ - } - } - if out.Kind() != reflect.Array { - out.Set(out.Slice(0, j)) - } - if iface.IsValid() { - iface.Set(out) - } - return true -} - -func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { - switch out.Kind() { - case reflect.Struct: - return d.mappingStruct(n, out) - case reflect.Slice: - return d.mappingSlice(n, out) - case reflect.Map: - // okay - case reflect.Interface: - if d.mapType.Kind() == reflect.Map { - iface := out - out = reflect.MakeMap(d.mapType) - iface.Set(out) - } else { - slicev := reflect.New(d.mapType).Elem() - if !d.mappingSlice(n, slicev) { - return false - } - out.Set(slicev) - return true - } - default: - d.terror(n, yaml_MAP_TAG, out) - return false - } - outt := out.Type() - kt := outt.Key() - et := outt.Elem() - - mapType := d.mapType - if outt.Key() == ifaceType && outt.Elem() == ifaceType { - d.mapType = outt - } - - if out.IsNil() { - out.Set(reflect.MakeMap(outt)) - } - l := len(n.children) - for i := 0; i < l; i += 2 { - if isMerge(n.children[i]) { - d.merge(n.children[i+1], out) - continue - } - k := reflect.New(kt).Elem() - if d.unmarshal(n.children[i], k) { - kkind := k.Kind() - if kkind == reflect.Interface { - kkind = k.Elem().Kind() - } - if kkind == reflect.Map || kkind == reflect.Slice { - failf("invalid map key: %#v", k.Interface()) - } - e := reflect.New(et).Elem() - if d.unmarshal(n.children[i+1], e) { - d.setMapIndex(n.children[i+1], out, k, e) - } - } - } - d.mapType = mapType - return true -} - -func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) { - if d.strict && out.MapIndex(k) != zeroValue { - d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface())) - return - } - out.SetMapIndex(k, v) -} - -func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { - outt := out.Type() - if outt.Elem() != mapItemType { - d.terror(n, yaml_MAP_TAG, out) - return false - } - - mapType := d.mapType - d.mapType = outt - - var slice []MapItem - var l = len(n.children) - for i := 0; i < l; i += 2 { - if isMerge(n.children[i]) { - d.merge(n.children[i+1], out) - continue - } - item := MapItem{} - k := reflect.ValueOf(&item.Key).Elem() - if d.unmarshal(n.children[i], k) { - v := reflect.ValueOf(&item.Value).Elem() - if d.unmarshal(n.children[i+1], v) { - slice = append(slice, item) - } - } - } - out.Set(reflect.ValueOf(slice)) - d.mapType = mapType - return true -} - -func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { - sinfo, err := getStructInfo(out.Type()) - if err != nil { - panic(err) - } - name := settableValueOf("") - l := len(n.children) - - var inlineMap reflect.Value - var elemType reflect.Type - if sinfo.InlineMap != -1 { - inlineMap = out.Field(sinfo.InlineMap) - inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) - elemType = inlineMap.Type().Elem() - } - - var doneFields []bool - if d.strict { - doneFields = make([]bool, len(sinfo.FieldsList)) - } - for i := 0; i < l; i += 2 { - ni := n.children[i] - if isMerge(ni) { - d.merge(n.children[i+1], out) - continue - } - if !d.unmarshal(ni, name) { - continue - } - if info, ok := sinfo.FieldsMap[name.String()]; ok { - if d.strict { - if doneFields[info.Id] { - d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type())) - continue - } - doneFields[info.Id] = true - } - var field reflect.Value - if info.Inline == nil { - field = out.Field(info.Num) - } else { - field = out.FieldByIndex(info.Inline) - } - d.unmarshal(n.children[i+1], field) - } else if sinfo.InlineMap != -1 { - if inlineMap.IsNil() { - inlineMap.Set(reflect.MakeMap(inlineMap.Type())) - } - value := reflect.New(elemType).Elem() - d.unmarshal(n.children[i+1], value) - d.setMapIndex(n.children[i+1], inlineMap, name, value) - } else if d.strict { - d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type())) - } - } - return true -} - -func failWantMap() { - failf("map merge requires map or sequence of maps as the value") -} - -func (d *decoder) merge(n *node, out reflect.Value) { - switch n.kind { - case mappingNode: - d.unmarshal(n, out) - case aliasNode: - if n.alias != nil && n.alias.kind != mappingNode { - failWantMap() - } - d.unmarshal(n, out) - case sequenceNode: - // Step backwards as earlier nodes take precedence. - for i := len(n.children) - 1; i >= 0; i-- { - ni := n.children[i] - if ni.kind == aliasNode { - if ni.alias != nil && ni.alias.kind != mappingNode { - failWantMap() - } - } else if ni.kind != mappingNode { - failWantMap() - } - d.unmarshal(ni, out) - } - default: - failWantMap() - } -} - -func isMerge(n *node) bool { - return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) -} diff --git a/vendor/gopkg.in/yaml.v2/emitterc.go b/vendor/gopkg.in/yaml.v2/emitterc.go deleted file mode 100644 index a1c2cc5262..0000000000 --- a/vendor/gopkg.in/yaml.v2/emitterc.go +++ /dev/null @@ -1,1685 +0,0 @@ -package yaml - -import ( - "bytes" - "fmt" -) - -// Flush the buffer if needed. -func flush(emitter *yaml_emitter_t) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) { - return yaml_emitter_flush(emitter) - } - return true -} - -// Put a character to the output buffer. -func put(emitter *yaml_emitter_t, value byte) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - emitter.buffer[emitter.buffer_pos] = value - emitter.buffer_pos++ - emitter.column++ - return true -} - -// Put a line break to the output buffer. -func put_break(emitter *yaml_emitter_t) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - switch emitter.line_break { - case yaml_CR_BREAK: - emitter.buffer[emitter.buffer_pos] = '\r' - emitter.buffer_pos += 1 - case yaml_LN_BREAK: - emitter.buffer[emitter.buffer_pos] = '\n' - emitter.buffer_pos += 1 - case yaml_CRLN_BREAK: - emitter.buffer[emitter.buffer_pos+0] = '\r' - emitter.buffer[emitter.buffer_pos+1] = '\n' - emitter.buffer_pos += 2 - default: - panic("unknown line break setting") - } - emitter.column = 0 - emitter.line++ - return true -} - -// Copy a character from a string into buffer. -func write(emitter *yaml_emitter_t, s []byte, i *int) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - p := emitter.buffer_pos - w := width(s[*i]) - switch w { - case 4: - emitter.buffer[p+3] = s[*i+3] - fallthrough - case 3: - emitter.buffer[p+2] = s[*i+2] - fallthrough - case 2: - emitter.buffer[p+1] = s[*i+1] - fallthrough - case 1: - emitter.buffer[p+0] = s[*i+0] - default: - panic("unknown character width") - } - emitter.column++ - emitter.buffer_pos += w - *i += w - return true -} - -// Write a whole string into buffer. -func write_all(emitter *yaml_emitter_t, s []byte) bool { - for i := 0; i < len(s); { - if !write(emitter, s, &i) { - return false - } - } - return true -} - -// Copy a line break character from a string into buffer. -func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { - if s[*i] == '\n' { - if !put_break(emitter) { - return false - } - *i++ - } else { - if !write(emitter, s, i) { - return false - } - emitter.column = 0 - emitter.line++ - } - return true -} - -// Set an emitter error and return false. -func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { - emitter.error = yaml_EMITTER_ERROR - emitter.problem = problem - return false -} - -// Emit an event. -func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { - emitter.events = append(emitter.events, *event) - for !yaml_emitter_need_more_events(emitter) { - event := &emitter.events[emitter.events_head] - if !yaml_emitter_analyze_event(emitter, event) { - return false - } - if !yaml_emitter_state_machine(emitter, event) { - return false - } - yaml_event_delete(event) - emitter.events_head++ - } - return true -} - -// Check if we need to accumulate more events before emitting. -// -// We accumulate extra -// - 1 event for DOCUMENT-START -// - 2 events for SEQUENCE-START -// - 3 events for MAPPING-START -// -func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { - if emitter.events_head == len(emitter.events) { - return true - } - var accumulate int - switch emitter.events[emitter.events_head].typ { - case yaml_DOCUMENT_START_EVENT: - accumulate = 1 - break - case yaml_SEQUENCE_START_EVENT: - accumulate = 2 - break - case yaml_MAPPING_START_EVENT: - accumulate = 3 - break - default: - return false - } - if len(emitter.events)-emitter.events_head > accumulate { - return false - } - var level int - for i := emitter.events_head; i < len(emitter.events); i++ { - switch emitter.events[i].typ { - case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: - level++ - case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: - level-- - } - if level == 0 { - return false - } - } - return true -} - -// Append a directive to the directives stack. -func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { - for i := 0; i < len(emitter.tag_directives); i++ { - if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { - if allow_duplicates { - return true - } - return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") - } - } - - // [Go] Do we actually need to copy this given garbage collection - // and the lack of deallocating destructors? - tag_copy := yaml_tag_directive_t{ - handle: make([]byte, len(value.handle)), - prefix: make([]byte, len(value.prefix)), - } - copy(tag_copy.handle, value.handle) - copy(tag_copy.prefix, value.prefix) - emitter.tag_directives = append(emitter.tag_directives, tag_copy) - return true -} - -// Increase the indentation level. -func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { - emitter.indents = append(emitter.indents, emitter.indent) - if emitter.indent < 0 { - if flow { - emitter.indent = emitter.best_indent - } else { - emitter.indent = 0 - } - } else if !indentless { - emitter.indent += emitter.best_indent - } - return true -} - -// State dispatcher. -func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { - switch emitter.state { - default: - case yaml_EMIT_STREAM_START_STATE: - return yaml_emitter_emit_stream_start(emitter, event) - - case yaml_EMIT_FIRST_DOCUMENT_START_STATE: - return yaml_emitter_emit_document_start(emitter, event, true) - - case yaml_EMIT_DOCUMENT_START_STATE: - return yaml_emitter_emit_document_start(emitter, event, false) - - case yaml_EMIT_DOCUMENT_CONTENT_STATE: - return yaml_emitter_emit_document_content(emitter, event) - - case yaml_EMIT_DOCUMENT_END_STATE: - return yaml_emitter_emit_document_end(emitter, event) - - case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, true) - - case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, false) - - case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, true) - - case yaml_EMIT_FLOW_MAPPING_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, false) - - case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: - return yaml_emitter_emit_flow_mapping_value(emitter, event, true) - - case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: - return yaml_emitter_emit_flow_mapping_value(emitter, event, false) - - case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: - return yaml_emitter_emit_block_sequence_item(emitter, event, true) - - case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: - return yaml_emitter_emit_block_sequence_item(emitter, event, false) - - case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: - return yaml_emitter_emit_block_mapping_key(emitter, event, true) - - case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: - return yaml_emitter_emit_block_mapping_key(emitter, event, false) - - case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: - return yaml_emitter_emit_block_mapping_value(emitter, event, true) - - case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: - return yaml_emitter_emit_block_mapping_value(emitter, event, false) - - case yaml_EMIT_END_STATE: - return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") - } - panic("invalid emitter state") -} - -// Expect STREAM-START. -func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if event.typ != yaml_STREAM_START_EVENT { - return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") - } - if emitter.encoding == yaml_ANY_ENCODING { - emitter.encoding = event.encoding - if emitter.encoding == yaml_ANY_ENCODING { - emitter.encoding = yaml_UTF8_ENCODING - } - } - if emitter.best_indent < 2 || emitter.best_indent > 9 { - emitter.best_indent = 2 - } - if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { - emitter.best_width = 80 - } - if emitter.best_width < 0 { - emitter.best_width = 1<<31 - 1 - } - if emitter.line_break == yaml_ANY_BREAK { - emitter.line_break = yaml_LN_BREAK - } - - emitter.indent = -1 - emitter.line = 0 - emitter.column = 0 - emitter.whitespace = true - emitter.indention = true - - if emitter.encoding != yaml_UTF8_ENCODING { - if !yaml_emitter_write_bom(emitter) { - return false - } - } - emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE - return true -} - -// Expect DOCUMENT-START or STREAM-END. -func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - - if event.typ == yaml_DOCUMENT_START_EVENT { - - if event.version_directive != nil { - if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { - return false - } - } - - for i := 0; i < len(event.tag_directives); i++ { - tag_directive := &event.tag_directives[i] - if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { - return false - } - if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { - return false - } - } - - for i := 0; i < len(default_tag_directives); i++ { - tag_directive := &default_tag_directives[i] - if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { - return false - } - } - - implicit := event.implicit - if !first || emitter.canonical { - implicit = false - } - - if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if event.version_directive != nil { - implicit = false - if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if len(event.tag_directives) > 0 { - implicit = false - for i := 0; i < len(event.tag_directives); i++ { - tag_directive := &event.tag_directives[i] - if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { - return false - } - if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { - return false - } - if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - } - - if yaml_emitter_check_empty_document(emitter) { - implicit = false - } - if !implicit { - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { - return false - } - if emitter.canonical { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - } - - emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE - return true - } - - if event.typ == yaml_STREAM_END_EVENT { - if emitter.open_ended { - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_flush(emitter) { - return false - } - emitter.state = yaml_EMIT_END_STATE - return true - } - - return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") -} - -// Expect the root node. -func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { - emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) - return yaml_emitter_emit_node(emitter, event, true, false, false, false) -} - -// Expect DOCUMENT-END. -func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if event.typ != yaml_DOCUMENT_END_EVENT { - return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if !event.implicit { - // [Go] Allocate the slice elsewhere. - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_flush(emitter) { - return false - } - emitter.state = yaml_EMIT_DOCUMENT_START_STATE - emitter.tag_directives = emitter.tag_directives[:0] - return true -} - -// Expect a flow item node. -func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - emitter.flow_level++ - } - - if event.typ == yaml_SEQUENCE_END_EVENT { - emitter.flow_level-- - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - if emitter.canonical && !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - - return true - } - - if !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) - return yaml_emitter_emit_node(emitter, event, false, true, false, false) -} - -// Expect a flow key node. -func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - emitter.flow_level++ - } - - if event.typ == yaml_MAPPING_END_EVENT { - emitter.flow_level-- - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - if emitter.canonical && !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - - if !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, true) - } - if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a flow value node. -func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { - if simple { - if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { - return false - } - } else { - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a block item node. -func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { - return false - } - } - if event.typ == yaml_SEQUENCE_END_EVENT { - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) - return yaml_emitter_emit_node(emitter, event, false, true, false, false) -} - -// Expect a block key node. -func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_increase_indent(emitter, false, false) { - return false - } - } - if event.typ == yaml_MAPPING_END_EVENT { - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if yaml_emitter_check_simple_key(emitter) { - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, true) - } - if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a block value node. -func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { - if simple { - if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { - return false - } - } else { - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a node. -func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, - root bool, sequence bool, mapping bool, simple_key bool) bool { - - emitter.root_context = root - emitter.sequence_context = sequence - emitter.mapping_context = mapping - emitter.simple_key_context = simple_key - - switch event.typ { - case yaml_ALIAS_EVENT: - return yaml_emitter_emit_alias(emitter, event) - case yaml_SCALAR_EVENT: - return yaml_emitter_emit_scalar(emitter, event) - case yaml_SEQUENCE_START_EVENT: - return yaml_emitter_emit_sequence_start(emitter, event) - case yaml_MAPPING_START_EVENT: - return yaml_emitter_emit_mapping_start(emitter, event) - default: - return yaml_emitter_set_emitter_error(emitter, - fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) - } -} - -// Expect ALIAS. -func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true -} - -// Expect SCALAR. -func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_select_scalar_style(emitter, event) { - return false - } - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - if !yaml_emitter_process_scalar(emitter) { - return false - } - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true -} - -// Expect SEQUENCE-START. -func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || - yaml_emitter_check_empty_sequence(emitter) { - emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE - } else { - emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE - } - return true -} - -// Expect MAPPING-START. -func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || - yaml_emitter_check_empty_mapping(emitter) { - emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE - } else { - emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE - } - return true -} - -// Check if the document content is an empty scalar. -func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { - return false // [Go] Huh? -} - -// Check if the next events represent an empty sequence. -func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { - if len(emitter.events)-emitter.events_head < 2 { - return false - } - return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && - emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT -} - -// Check if the next events represent an empty mapping. -func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { - if len(emitter.events)-emitter.events_head < 2 { - return false - } - return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && - emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT -} - -// Check if the next node can be expressed as a simple key. -func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { - length := 0 - switch emitter.events[emitter.events_head].typ { - case yaml_ALIAS_EVENT: - length += len(emitter.anchor_data.anchor) - case yaml_SCALAR_EVENT: - if emitter.scalar_data.multiline { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) + - len(emitter.scalar_data.value) - case yaml_SEQUENCE_START_EVENT: - if !yaml_emitter_check_empty_sequence(emitter) { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) - case yaml_MAPPING_START_EVENT: - if !yaml_emitter_check_empty_mapping(emitter) { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) - default: - return false - } - return length <= 128 -} - -// Determine an acceptable scalar style. -func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { - - no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 - if no_tag && !event.implicit && !event.quoted_implicit { - return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") - } - - style := event.scalar_style() - if style == yaml_ANY_SCALAR_STYLE { - style = yaml_PLAIN_SCALAR_STYLE - } - if emitter.canonical { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - if emitter.simple_key_context && emitter.scalar_data.multiline { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - - if style == yaml_PLAIN_SCALAR_STYLE { - if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || - emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - if no_tag && !event.implicit { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - } - if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { - if !emitter.scalar_data.single_quoted_allowed { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - } - if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { - if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - } - - if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { - emitter.tag_data.handle = []byte{'!'} - } - emitter.scalar_data.style = style - return true -} - -// Write an anchor. -func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { - if emitter.anchor_data.anchor == nil { - return true - } - c := []byte{'&'} - if emitter.anchor_data.alias { - c[0] = '*' - } - if !yaml_emitter_write_indicator(emitter, c, true, false, false) { - return false - } - return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) -} - -// Write a tag. -func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { - if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { - return true - } - if len(emitter.tag_data.handle) > 0 { - if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { - return false - } - if len(emitter.tag_data.suffix) > 0 { - if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { - return false - } - } - } else { - // [Go] Allocate these slices elsewhere. - if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { - return false - } - if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { - return false - } - } - return true -} - -// Write a scalar. -func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { - switch emitter.scalar_data.style { - case yaml_PLAIN_SCALAR_STYLE: - return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_SINGLE_QUOTED_SCALAR_STYLE: - return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_DOUBLE_QUOTED_SCALAR_STYLE: - return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_LITERAL_SCALAR_STYLE: - return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) - - case yaml_FOLDED_SCALAR_STYLE: - return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) - } - panic("unknown scalar style") -} - -// Check if a %YAML directive is valid. -func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { - if version_directive.major != 1 || version_directive.minor != 1 { - return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") - } - return true -} - -// Check if a %TAG directive is valid. -func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { - handle := tag_directive.handle - prefix := tag_directive.prefix - if len(handle) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") - } - if handle[0] != '!' { - return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") - } - if handle[len(handle)-1] != '!' { - return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") - } - for i := 1; i < len(handle)-1; i += width(handle[i]) { - if !is_alpha(handle, i) { - return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") - } - } - if len(prefix) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") - } - return true -} - -// Check if an anchor is valid. -func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { - if len(anchor) == 0 { - problem := "anchor value must not be empty" - if alias { - problem = "alias value must not be empty" - } - return yaml_emitter_set_emitter_error(emitter, problem) - } - for i := 0; i < len(anchor); i += width(anchor[i]) { - if !is_alpha(anchor, i) { - problem := "anchor value must contain alphanumerical characters only" - if alias { - problem = "alias value must contain alphanumerical characters only" - } - return yaml_emitter_set_emitter_error(emitter, problem) - } - } - emitter.anchor_data.anchor = anchor - emitter.anchor_data.alias = alias - return true -} - -// Check if a tag is valid. -func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { - if len(tag) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") - } - for i := 0; i < len(emitter.tag_directives); i++ { - tag_directive := &emitter.tag_directives[i] - if bytes.HasPrefix(tag, tag_directive.prefix) { - emitter.tag_data.handle = tag_directive.handle - emitter.tag_data.suffix = tag[len(tag_directive.prefix):] - return true - } - } - emitter.tag_data.suffix = tag - return true -} - -// Check if a scalar is valid. -func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { - var ( - block_indicators = false - flow_indicators = false - line_breaks = false - special_characters = false - - leading_space = false - leading_break = false - trailing_space = false - trailing_break = false - break_space = false - space_break = false - - preceded_by_whitespace = false - followed_by_whitespace = false - previous_space = false - previous_break = false - ) - - emitter.scalar_data.value = value - - if len(value) == 0 { - emitter.scalar_data.multiline = false - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = true - emitter.scalar_data.single_quoted_allowed = true - emitter.scalar_data.block_allowed = false - return true - } - - if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { - block_indicators = true - flow_indicators = true - } - - preceded_by_whitespace = true - for i, w := 0, 0; i < len(value); i += w { - w = width(value[i]) - followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) - - if i == 0 { - switch value[i] { - case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': - flow_indicators = true - block_indicators = true - case '?', ':': - flow_indicators = true - if followed_by_whitespace { - block_indicators = true - } - case '-': - if followed_by_whitespace { - flow_indicators = true - block_indicators = true - } - } - } else { - switch value[i] { - case ',', '?', '[', ']', '{', '}': - flow_indicators = true - case ':': - flow_indicators = true - if followed_by_whitespace { - block_indicators = true - } - case '#': - if preceded_by_whitespace { - flow_indicators = true - block_indicators = true - } - } - } - - if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { - special_characters = true - } - if is_space(value, i) { - if i == 0 { - leading_space = true - } - if i+width(value[i]) == len(value) { - trailing_space = true - } - if previous_break { - break_space = true - } - previous_space = true - previous_break = false - } else if is_break(value, i) { - line_breaks = true - if i == 0 { - leading_break = true - } - if i+width(value[i]) == len(value) { - trailing_break = true - } - if previous_space { - space_break = true - } - previous_space = false - previous_break = true - } else { - previous_space = false - previous_break = false - } - - // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. - preceded_by_whitespace = is_blankz(value, i) - } - - emitter.scalar_data.multiline = line_breaks - emitter.scalar_data.flow_plain_allowed = true - emitter.scalar_data.block_plain_allowed = true - emitter.scalar_data.single_quoted_allowed = true - emitter.scalar_data.block_allowed = true - - if leading_space || leading_break || trailing_space || trailing_break { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - } - if trailing_space { - emitter.scalar_data.block_allowed = false - } - if break_space { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - emitter.scalar_data.single_quoted_allowed = false - } - if space_break || special_characters { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - emitter.scalar_data.single_quoted_allowed = false - emitter.scalar_data.block_allowed = false - } - if line_breaks { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - } - if flow_indicators { - emitter.scalar_data.flow_plain_allowed = false - } - if block_indicators { - emitter.scalar_data.block_plain_allowed = false - } - return true -} - -// Check if the event data is valid. -func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { - - emitter.anchor_data.anchor = nil - emitter.tag_data.handle = nil - emitter.tag_data.suffix = nil - emitter.scalar_data.value = nil - - switch event.typ { - case yaml_ALIAS_EVENT: - if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { - return false - } - - case yaml_SCALAR_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - if !yaml_emitter_analyze_scalar(emitter, event.value) { - return false - } - - case yaml_SEQUENCE_START_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - - case yaml_MAPPING_START_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - } - return true -} - -// Write the BOM character. -func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { - if !flush(emitter) { - return false - } - pos := emitter.buffer_pos - emitter.buffer[pos+0] = '\xEF' - emitter.buffer[pos+1] = '\xBB' - emitter.buffer[pos+2] = '\xBF' - emitter.buffer_pos += 3 - return true -} - -func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { - indent := emitter.indent - if indent < 0 { - indent = 0 - } - if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { - if !put_break(emitter) { - return false - } - } - for emitter.column < indent { - if !put(emitter, ' ') { - return false - } - } - emitter.whitespace = true - emitter.indention = true - return true -} - -func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { - if need_whitespace && !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - if !write_all(emitter, indicator) { - return false - } - emitter.whitespace = is_whitespace - emitter.indention = (emitter.indention && is_indention) - emitter.open_ended = false - return true -} - -func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { - if !write_all(emitter, value) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { - if !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - if !write_all(emitter, value) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { - if need_whitespace && !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - for i := 0; i < len(value); { - var must_write bool - switch value[i] { - case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': - must_write = true - default: - must_write = is_alpha(value, i) - } - if must_write { - if !write(emitter, value, &i) { - return false - } - } else { - w := width(value[i]) - for k := 0; k < w; k++ { - octet := value[i] - i++ - if !put(emitter, '%') { - return false - } - - c := octet >> 4 - if c < 10 { - c += '0' - } else { - c += 'A' - 10 - } - if !put(emitter, c) { - return false - } - - c = octet & 0x0f - if c < 10 { - c += '0' - } else { - c += 'A' - 10 - } - if !put(emitter, c) { - return false - } - } - } - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - if !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - - spaces := false - breaks := false - for i := 0; i < len(value); { - if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - spaces = true - } else if is_break(value, i) { - if !breaks && value[i] == '\n' { - if !put_break(emitter) { - return false - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - spaces = false - breaks = false - } - } - - emitter.whitespace = false - emitter.indention = false - if emitter.root_context { - emitter.open_ended = true - } - - return true -} - -func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - - if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { - return false - } - - spaces := false - breaks := false - for i := 0; i < len(value); { - if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - spaces = true - } else if is_break(value, i) { - if !breaks && value[i] == '\n' { - if !put_break(emitter) { - return false - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if value[i] == '\'' { - if !put(emitter, '\'') { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - spaces = false - breaks = false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - spaces := false - if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { - return false - } - - for i := 0; i < len(value); { - if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || - is_bom(value, i) || is_break(value, i) || - value[i] == '"' || value[i] == '\\' { - - octet := value[i] - - var w int - var v rune - switch { - case octet&0x80 == 0x00: - w, v = 1, rune(octet&0x7F) - case octet&0xE0 == 0xC0: - w, v = 2, rune(octet&0x1F) - case octet&0xF0 == 0xE0: - w, v = 3, rune(octet&0x0F) - case octet&0xF8 == 0xF0: - w, v = 4, rune(octet&0x07) - } - for k := 1; k < w; k++ { - octet = value[i+k] - v = (v << 6) + (rune(octet) & 0x3F) - } - i += w - - if !put(emitter, '\\') { - return false - } - - var ok bool - switch v { - case 0x00: - ok = put(emitter, '0') - case 0x07: - ok = put(emitter, 'a') - case 0x08: - ok = put(emitter, 'b') - case 0x09: - ok = put(emitter, 't') - case 0x0A: - ok = put(emitter, 'n') - case 0x0b: - ok = put(emitter, 'v') - case 0x0c: - ok = put(emitter, 'f') - case 0x0d: - ok = put(emitter, 'r') - case 0x1b: - ok = put(emitter, 'e') - case 0x22: - ok = put(emitter, '"') - case 0x5c: - ok = put(emitter, '\\') - case 0x85: - ok = put(emitter, 'N') - case 0xA0: - ok = put(emitter, '_') - case 0x2028: - ok = put(emitter, 'L') - case 0x2029: - ok = put(emitter, 'P') - default: - if v <= 0xFF { - ok = put(emitter, 'x') - w = 2 - } else if v <= 0xFFFF { - ok = put(emitter, 'u') - w = 4 - } else { - ok = put(emitter, 'U') - w = 8 - } - for k := (w - 1) * 4; ok && k >= 0; k -= 4 { - digit := byte((v >> uint(k)) & 0x0F) - if digit < 10 { - ok = put(emitter, digit+'0') - } else { - ok = put(emitter, digit+'A'-10) - } - } - } - if !ok { - return false - } - spaces = false - } else if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { - if !yaml_emitter_write_indent(emitter) { - return false - } - if is_space(value, i+1) { - if !put(emitter, '\\') { - return false - } - } - i += width(value[i]) - } else if !write(emitter, value, &i) { - return false - } - spaces = true - } else { - if !write(emitter, value, &i) { - return false - } - spaces = false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { - if is_space(value, 0) || is_break(value, 0) { - indent_hint := []byte{'0' + byte(emitter.best_indent)} - if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { - return false - } - } - - emitter.open_ended = false - - var chomp_hint [1]byte - if len(value) == 0 { - chomp_hint[0] = '-' - } else { - i := len(value) - 1 - for value[i]&0xC0 == 0x80 { - i-- - } - if !is_break(value, i) { - chomp_hint[0] = '-' - } else if i == 0 { - chomp_hint[0] = '+' - emitter.open_ended = true - } else { - i-- - for value[i]&0xC0 == 0x80 { - i-- - } - if is_break(value, i) { - chomp_hint[0] = '+' - emitter.open_ended = true - } - } - } - if chomp_hint[0] != 0 { - if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { - return false - } - } - return true -} - -func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { - if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { - return false - } - if !yaml_emitter_write_block_scalar_hints(emitter, value) { - return false - } - if !put_break(emitter) { - return false - } - emitter.indention = true - emitter.whitespace = true - breaks := true - for i := 0; i < len(value); { - if is_break(value, i) { - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - breaks = false - } - } - - return true -} - -func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { - if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { - return false - } - if !yaml_emitter_write_block_scalar_hints(emitter, value) { - return false - } - - if !put_break(emitter) { - return false - } - emitter.indention = true - emitter.whitespace = true - - breaks := true - leading_spaces := true - for i := 0; i < len(value); { - if is_break(value, i) { - if !breaks && !leading_spaces && value[i] == '\n' { - k := 0 - for is_break(value, k) { - k += width(value[k]) - } - if !is_blankz(value, k) { - if !put_break(emitter) { - return false - } - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - leading_spaces = is_blank(value, i) - } - if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - emitter.indention = false - breaks = false - } - } - return true -} diff --git a/vendor/gopkg.in/yaml.v2/encode.go b/vendor/gopkg.in/yaml.v2/encode.go deleted file mode 100644 index 0ee738e11b..0000000000 --- a/vendor/gopkg.in/yaml.v2/encode.go +++ /dev/null @@ -1,390 +0,0 @@ -package yaml - -import ( - "encoding" - "fmt" - "io" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - "time" - "unicode/utf8" -) - -// jsonNumber is the interface of the encoding/json.Number datatype. -// Repeating the interface here avoids a dependency on encoding/json, and also -// supports other libraries like jsoniter, which use a similar datatype with -// the same interface. Detecting this interface is useful when dealing with -// structures containing json.Number, which is a string under the hood. The -// encoder should prefer the use of Int64(), Float64() and string(), in that -// order, when encoding this type. -type jsonNumber interface { - Float64() (float64, error) - Int64() (int64, error) - String() string -} - -type encoder struct { - emitter yaml_emitter_t - event yaml_event_t - out []byte - flow bool - // doneInit holds whether the initial stream_start_event has been - // emitted. - doneInit bool -} - -func newEncoder() *encoder { - e := &encoder{} - yaml_emitter_initialize(&e.emitter) - yaml_emitter_set_output_string(&e.emitter, &e.out) - yaml_emitter_set_unicode(&e.emitter, true) - return e -} - -func newEncoderWithWriter(w io.Writer) *encoder { - e := &encoder{} - yaml_emitter_initialize(&e.emitter) - yaml_emitter_set_output_writer(&e.emitter, w) - yaml_emitter_set_unicode(&e.emitter, true) - return e -} - -func (e *encoder) init() { - if e.doneInit { - return - } - yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) - e.emit() - e.doneInit = true -} - -func (e *encoder) finish() { - e.emitter.open_ended = false - yaml_stream_end_event_initialize(&e.event) - e.emit() -} - -func (e *encoder) destroy() { - yaml_emitter_delete(&e.emitter) -} - -func (e *encoder) emit() { - // This will internally delete the e.event value. - e.must(yaml_emitter_emit(&e.emitter, &e.event)) -} - -func (e *encoder) must(ok bool) { - if !ok { - msg := e.emitter.problem - if msg == "" { - msg = "unknown problem generating YAML content" - } - failf("%s", msg) - } -} - -func (e *encoder) marshalDoc(tag string, in reflect.Value) { - e.init() - yaml_document_start_event_initialize(&e.event, nil, nil, true) - e.emit() - e.marshal(tag, in) - yaml_document_end_event_initialize(&e.event, true) - e.emit() -} - -func (e *encoder) marshal(tag string, in reflect.Value) { - if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { - e.nilv() - return - } - iface := in.Interface() - switch m := iface.(type) { - case jsonNumber: - integer, err := m.Int64() - if err == nil { - // In this case the json.Number is a valid int64 - in = reflect.ValueOf(integer) - break - } - float, err := m.Float64() - if err == nil { - // In this case the json.Number is a valid float64 - in = reflect.ValueOf(float) - break - } - // fallback case - no number could be obtained - in = reflect.ValueOf(m.String()) - case time.Time, *time.Time: - // Although time.Time implements TextMarshaler, - // we don't want to treat it as a string for YAML - // purposes because YAML has special support for - // timestamps. - case Marshaler: - v, err := m.MarshalYAML() - if err != nil { - fail(err) - } - if v == nil { - e.nilv() - return - } - in = reflect.ValueOf(v) - case encoding.TextMarshaler: - text, err := m.MarshalText() - if err != nil { - fail(err) - } - in = reflect.ValueOf(string(text)) - case nil: - e.nilv() - return - } - switch in.Kind() { - case reflect.Interface: - e.marshal(tag, in.Elem()) - case reflect.Map: - e.mapv(tag, in) - case reflect.Ptr: - if in.Type() == ptrTimeType { - e.timev(tag, in.Elem()) - } else { - e.marshal(tag, in.Elem()) - } - case reflect.Struct: - if in.Type() == timeType { - e.timev(tag, in) - } else { - e.structv(tag, in) - } - case reflect.Slice, reflect.Array: - if in.Type().Elem() == mapItemType { - e.itemsv(tag, in) - } else { - e.slicev(tag, in) - } - case reflect.String: - e.stringv(tag, in) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - if in.Type() == durationType { - e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String())) - } else { - e.intv(tag, in) - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - e.uintv(tag, in) - case reflect.Float32, reflect.Float64: - e.floatv(tag, in) - case reflect.Bool: - e.boolv(tag, in) - default: - panic("cannot marshal type: " + in.Type().String()) - } -} - -func (e *encoder) mapv(tag string, in reflect.Value) { - e.mappingv(tag, func() { - keys := keyList(in.MapKeys()) - sort.Sort(keys) - for _, k := range keys { - e.marshal("", k) - e.marshal("", in.MapIndex(k)) - } - }) -} - -func (e *encoder) itemsv(tag string, in reflect.Value) { - e.mappingv(tag, func() { - slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem) - for _, item := range slice { - e.marshal("", reflect.ValueOf(item.Key)) - e.marshal("", reflect.ValueOf(item.Value)) - } - }) -} - -func (e *encoder) structv(tag string, in reflect.Value) { - sinfo, err := getStructInfo(in.Type()) - if err != nil { - panic(err) - } - e.mappingv(tag, func() { - for _, info := range sinfo.FieldsList { - var value reflect.Value - if info.Inline == nil { - value = in.Field(info.Num) - } else { - value = in.FieldByIndex(info.Inline) - } - if info.OmitEmpty && isZero(value) { - continue - } - e.marshal("", reflect.ValueOf(info.Key)) - e.flow = info.Flow - e.marshal("", value) - } - if sinfo.InlineMap >= 0 { - m := in.Field(sinfo.InlineMap) - if m.Len() > 0 { - e.flow = false - keys := keyList(m.MapKeys()) - sort.Sort(keys) - for _, k := range keys { - if _, found := sinfo.FieldsMap[k.String()]; found { - panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String())) - } - e.marshal("", k) - e.flow = false - e.marshal("", m.MapIndex(k)) - } - } - } - }) -} - -func (e *encoder) mappingv(tag string, f func()) { - implicit := tag == "" - style := yaml_BLOCK_MAPPING_STYLE - if e.flow { - e.flow = false - style = yaml_FLOW_MAPPING_STYLE - } - yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) - e.emit() - f() - yaml_mapping_end_event_initialize(&e.event) - e.emit() -} - -func (e *encoder) slicev(tag string, in reflect.Value) { - implicit := tag == "" - style := yaml_BLOCK_SEQUENCE_STYLE - if e.flow { - e.flow = false - style = yaml_FLOW_SEQUENCE_STYLE - } - e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) - e.emit() - n := in.Len() - for i := 0; i < n; i++ { - e.marshal("", in.Index(i)) - } - e.must(yaml_sequence_end_event_initialize(&e.event)) - e.emit() -} - -// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. -// -// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported -// in YAML 1.2 and by this package, but these should be marshalled quoted for -// the time being for compatibility with other parsers. -func isBase60Float(s string) (result bool) { - // Fast path. - if s == "" { - return false - } - c := s[0] - if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { - return false - } - // Do the full match. - return base60float.MatchString(s) -} - -// From http://yaml.org/type/float.html, except the regular expression there -// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. -var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) - -func (e *encoder) stringv(tag string, in reflect.Value) { - var style yaml_scalar_style_t - s := in.String() - canUsePlain := true - switch { - case !utf8.ValidString(s): - if tag == yaml_BINARY_TAG { - failf("explicitly tagged !!binary data must be base64-encoded") - } - if tag != "" { - failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) - } - // It can't be encoded directly as YAML so use a binary tag - // and encode it as base64. - tag = yaml_BINARY_TAG - s = encodeBase64(s) - case tag == "": - // Check to see if it would resolve to a specific - // tag when encoded unquoted. If it doesn't, - // there's no need to quote it. - rtag, _ := resolve("", s) - canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s) - } - // Note: it's possible for user code to emit invalid YAML - // if they explicitly specify a tag and a string containing - // text that's incompatible with that tag. - switch { - case strings.Contains(s, "\n"): - style = yaml_LITERAL_SCALAR_STYLE - case canUsePlain: - style = yaml_PLAIN_SCALAR_STYLE - default: - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - e.emitScalar(s, "", tag, style) -} - -func (e *encoder) boolv(tag string, in reflect.Value) { - var s string - if in.Bool() { - s = "true" - } else { - s = "false" - } - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) intv(tag string, in reflect.Value) { - s := strconv.FormatInt(in.Int(), 10) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) uintv(tag string, in reflect.Value) { - s := strconv.FormatUint(in.Uint(), 10) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) timev(tag string, in reflect.Value) { - t := in.Interface().(time.Time) - s := t.Format(time.RFC3339Nano) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) floatv(tag string, in reflect.Value) { - // Issue #352: When formatting, use the precision of the underlying value - precision := 64 - if in.Kind() == reflect.Float32 { - precision = 32 - } - - s := strconv.FormatFloat(in.Float(), 'g', -1, precision) - switch s { - case "+Inf": - s = ".inf" - case "-Inf": - s = "-.inf" - case "NaN": - s = ".nan" - } - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) nilv() { - e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { - implicit := tag == "" - e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) - e.emit() -} diff --git a/vendor/gopkg.in/yaml.v2/parserc.go b/vendor/gopkg.in/yaml.v2/parserc.go deleted file mode 100644 index 81d05dfe57..0000000000 --- a/vendor/gopkg.in/yaml.v2/parserc.go +++ /dev/null @@ -1,1095 +0,0 @@ -package yaml - -import ( - "bytes" -) - -// The parser implements the following grammar: -// -// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END -// implicit_document ::= block_node DOCUMENT-END* -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// block_node_or_indentless_sequence ::= -// ALIAS -// | properties (block_content | indentless_block_sequence)? -// | block_content -// | indentless_block_sequence -// block_node ::= ALIAS -// | properties block_content? -// | block_content -// flow_node ::= ALIAS -// | properties flow_content? -// | flow_content -// properties ::= TAG ANCHOR? | ANCHOR TAG? -// block_content ::= block_collection | flow_collection | SCALAR -// flow_content ::= flow_collection | SCALAR -// block_collection ::= block_sequence | block_mapping -// flow_collection ::= flow_sequence | flow_mapping -// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END -// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ -// block_mapping ::= BLOCK-MAPPING_START -// ((KEY block_node_or_indentless_sequence?)? -// (VALUE block_node_or_indentless_sequence?)?)* -// BLOCK-END -// flow_sequence ::= FLOW-SEQUENCE-START -// (flow_sequence_entry FLOW-ENTRY)* -// flow_sequence_entry? -// FLOW-SEQUENCE-END -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// flow_mapping ::= FLOW-MAPPING-START -// (flow_mapping_entry FLOW-ENTRY)* -// flow_mapping_entry? -// FLOW-MAPPING-END -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - -// Peek the next token in the token queue. -func peek_token(parser *yaml_parser_t) *yaml_token_t { - if parser.token_available || yaml_parser_fetch_more_tokens(parser) { - return &parser.tokens[parser.tokens_head] - } - return nil -} - -// Remove the next token from the queue (must be called after peek_token). -func skip_token(parser *yaml_parser_t) { - parser.token_available = false - parser.tokens_parsed++ - parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN - parser.tokens_head++ -} - -// Get the next event. -func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { - // Erase the event object. - *event = yaml_event_t{} - - // No events after the end of the stream or error. - if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { - return true - } - - // Generate the next event. - return yaml_parser_state_machine(parser, event) -} - -// Set parser error. -func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { - parser.error = yaml_PARSER_ERROR - parser.problem = problem - parser.problem_mark = problem_mark - return false -} - -func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { - parser.error = yaml_PARSER_ERROR - parser.context = context - parser.context_mark = context_mark - parser.problem = problem - parser.problem_mark = problem_mark - return false -} - -// State dispatcher. -func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { - //trace("yaml_parser_state_machine", "state:", parser.state.String()) - - switch parser.state { - case yaml_PARSE_STREAM_START_STATE: - return yaml_parser_parse_stream_start(parser, event) - - case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: - return yaml_parser_parse_document_start(parser, event, true) - - case yaml_PARSE_DOCUMENT_START_STATE: - return yaml_parser_parse_document_start(parser, event, false) - - case yaml_PARSE_DOCUMENT_CONTENT_STATE: - return yaml_parser_parse_document_content(parser, event) - - case yaml_PARSE_DOCUMENT_END_STATE: - return yaml_parser_parse_document_end(parser, event) - - case yaml_PARSE_BLOCK_NODE_STATE: - return yaml_parser_parse_node(parser, event, true, false) - - case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: - return yaml_parser_parse_node(parser, event, true, true) - - case yaml_PARSE_FLOW_NODE_STATE: - return yaml_parser_parse_node(parser, event, false, false) - - case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: - return yaml_parser_parse_block_sequence_entry(parser, event, true) - - case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_block_sequence_entry(parser, event, false) - - case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_indentless_sequence_entry(parser, event) - - case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: - return yaml_parser_parse_block_mapping_key(parser, event, true) - - case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: - return yaml_parser_parse_block_mapping_key(parser, event, false) - - case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: - return yaml_parser_parse_block_mapping_value(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: - return yaml_parser_parse_flow_sequence_entry(parser, event, true) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_flow_sequence_entry(parser, event, false) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) - - case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: - return yaml_parser_parse_flow_mapping_key(parser, event, true) - - case yaml_PARSE_FLOW_MAPPING_KEY_STATE: - return yaml_parser_parse_flow_mapping_key(parser, event, false) - - case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: - return yaml_parser_parse_flow_mapping_value(parser, event, false) - - case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: - return yaml_parser_parse_flow_mapping_value(parser, event, true) - - default: - panic("invalid parser state") - } -} - -// Parse the production: -// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END -// ************ -func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_STREAM_START_TOKEN { - return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) - } - parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE - *event = yaml_event_t{ - typ: yaml_STREAM_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - encoding: token.encoding, - } - skip_token(parser) - return true -} - -// Parse the productions: -// implicit_document ::= block_node DOCUMENT-END* -// * -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// ************************* -func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { - - token := peek_token(parser) - if token == nil { - return false - } - - // Parse extra document end indicators. - if !implicit { - for token.typ == yaml_DOCUMENT_END_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } - - if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && - token.typ != yaml_TAG_DIRECTIVE_TOKEN && - token.typ != yaml_DOCUMENT_START_TOKEN && - token.typ != yaml_STREAM_END_TOKEN { - // Parse an implicit document. - if !yaml_parser_process_directives(parser, nil, nil) { - return false - } - parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) - parser.state = yaml_PARSE_BLOCK_NODE_STATE - - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - } else if token.typ != yaml_STREAM_END_TOKEN { - // Parse an explicit document. - var version_directive *yaml_version_directive_t - var tag_directives []yaml_tag_directive_t - start_mark := token.start_mark - if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { - return false - } - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_DOCUMENT_START_TOKEN { - yaml_parser_set_parser_error(parser, - "did not find expected ", token.start_mark) - return false - } - parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) - parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE - end_mark := token.end_mark - - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - version_directive: version_directive, - tag_directives: tag_directives, - implicit: false, - } - skip_token(parser) - - } else { - // Parse the stream end. - parser.state = yaml_PARSE_END_STATE - *event = yaml_event_t{ - typ: yaml_STREAM_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - } - - return true -} - -// Parse the productions: -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// *********** -// -func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || - token.typ == yaml_TAG_DIRECTIVE_TOKEN || - token.typ == yaml_DOCUMENT_START_TOKEN || - token.typ == yaml_DOCUMENT_END_TOKEN || - token.typ == yaml_STREAM_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - return yaml_parser_process_empty_scalar(parser, event, - token.start_mark) - } - return yaml_parser_parse_node(parser, event, true, false) -} - -// Parse the productions: -// implicit_document ::= block_node DOCUMENT-END* -// ************* -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// -func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - start_mark := token.start_mark - end_mark := token.start_mark - - implicit := true - if token.typ == yaml_DOCUMENT_END_TOKEN { - end_mark = token.end_mark - skip_token(parser) - implicit = false - } - - parser.tag_directives = parser.tag_directives[:0] - - parser.state = yaml_PARSE_DOCUMENT_START_STATE - *event = yaml_event_t{ - typ: yaml_DOCUMENT_END_EVENT, - start_mark: start_mark, - end_mark: end_mark, - implicit: implicit, - } - return true -} - -// Parse the productions: -// block_node_or_indentless_sequence ::= -// ALIAS -// ***** -// | properties (block_content | indentless_block_sequence)? -// ********** * -// | block_content | indentless_block_sequence -// * -// block_node ::= ALIAS -// ***** -// | properties block_content? -// ********** * -// | block_content -// * -// flow_node ::= ALIAS -// ***** -// | properties flow_content? -// ********** * -// | flow_content -// * -// properties ::= TAG ANCHOR? | ANCHOR TAG? -// ************************* -// block_content ::= block_collection | flow_collection | SCALAR -// ****** -// flow_content ::= flow_collection | SCALAR -// ****** -func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { - //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_ALIAS_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - *event = yaml_event_t{ - typ: yaml_ALIAS_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - anchor: token.value, - } - skip_token(parser) - return true - } - - start_mark := token.start_mark - end_mark := token.start_mark - - var tag_token bool - var tag_handle, tag_suffix, anchor []byte - var tag_mark yaml_mark_t - if token.typ == yaml_ANCHOR_TOKEN { - anchor = token.value - start_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_TAG_TOKEN { - tag_token = true - tag_handle = token.value - tag_suffix = token.suffix - tag_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } else if token.typ == yaml_TAG_TOKEN { - tag_token = true - tag_handle = token.value - tag_suffix = token.suffix - start_mark = token.start_mark - tag_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_ANCHOR_TOKEN { - anchor = token.value - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } - - var tag []byte - if tag_token { - if len(tag_handle) == 0 { - tag = tag_suffix - tag_suffix = nil - } else { - for i := range parser.tag_directives { - if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { - tag = append([]byte(nil), parser.tag_directives[i].prefix...) - tag = append(tag, tag_suffix...) - break - } - } - if len(tag) == 0 { - yaml_parser_set_parser_error_context(parser, - "while parsing a node", start_mark, - "found undefined tag handle", tag_mark) - return false - } - } - } - - implicit := len(tag) == 0 - if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), - } - return true - } - if token.typ == yaml_SCALAR_TOKEN { - var plain_implicit, quoted_implicit bool - end_mark = token.end_mark - if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { - plain_implicit = true - } else if len(tag) == 0 { - quoted_implicit = true - } - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - value: token.value, - implicit: plain_implicit, - quoted_implicit: quoted_implicit, - style: yaml_style_t(token.style), - } - skip_token(parser) - return true - } - if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { - // [Go] Some of the events below can be merged as they differ only on style. - end_mark = token.end_mark - parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), - } - return true - } - if token.typ == yaml_FLOW_MAPPING_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), - } - return true - } - if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), - } - return true - } - if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), - } - return true - } - if len(anchor) > 0 || len(tag) > 0 { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - quoted_implicit: false, - style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), - } - return true - } - - context := "while parsing a flow node" - if block { - context = "while parsing a block node" - } - yaml_parser_set_parser_error_context(parser, context, start_mark, - "did not find expected node content", token.start_mark) - return false -} - -// Parse the productions: -// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END -// ******************** *********** * ********* -// -func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_BLOCK_ENTRY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, true, false) - } else { - parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - } - if token.typ == yaml_BLOCK_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - skip_token(parser) - return true - } - - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a block collection", context_mark, - "did not find expected '-' indicator", token.start_mark) -} - -// Parse the productions: -// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ -// *********** * -func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_BLOCK_ENTRY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_BLOCK_ENTRY_TOKEN && - token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, true, false) - } - parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? - } - return true -} - -// Parse the productions: -// block_mapping ::= BLOCK-MAPPING_START -// ******************* -// ((KEY block_node_or_indentless_sequence?)? -// *** * -// (VALUE block_node_or_indentless_sequence?)?)* -// -// BLOCK-END -// ********* -// -func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_KEY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, true, true) - } else { - parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - } else if token.typ == yaml_BLOCK_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - return true - } - - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a block mapping", context_mark, - "did not find expected key", token.start_mark) -} - -// Parse the productions: -// block_mapping ::= BLOCK-MAPPING_START -// -// ((KEY block_node_or_indentless_sequence?)? -// -// (VALUE block_node_or_indentless_sequence?)?)* -// ***** * -// BLOCK-END -// -// -func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VALUE_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) - return yaml_parser_parse_node(parser, event, true, true) - } - parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Parse the productions: -// flow_sequence ::= FLOW-SEQUENCE-START -// ******************* -// (flow_sequence_entry FLOW-ENTRY)* -// * ********** -// flow_sequence_entry? -// * -// FLOW-SEQUENCE-END -// ***************** -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * -// -func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - if !first { - if token.typ == yaml_FLOW_ENTRY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } else { - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a flow sequence", context_mark, - "did not find expected ',' or ']'", token.start_mark) - } - } - - if token.typ == yaml_KEY_TOKEN { - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - implicit: true, - style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), - } - skip_token(parser) - return true - } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - skip_token(parser) - return true -} - -// -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// *** * -// -func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_FLOW_ENTRY_TOKEN && - token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - mark := token.end_mark - skip_token(parser) - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) -} - -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// ***** * -// -func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VALUE_TOKEN { - skip_token(parser) - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * -// -func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? - } - return true -} - -// Parse the productions: -// flow_mapping ::= FLOW-MAPPING-START -// ****************** -// (flow_mapping_entry FLOW-ENTRY)* -// * ********** -// flow_mapping_entry? -// ****************** -// FLOW-MAPPING-END -// **************** -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * *** * -// -func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ != yaml_FLOW_MAPPING_END_TOKEN { - if !first { - if token.typ == yaml_FLOW_ENTRY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } else { - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a flow mapping", context_mark, - "did not find expected ',' or '}'", token.start_mark) - } - } - - if token.typ == yaml_KEY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_FLOW_ENTRY_TOKEN && - token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } else { - parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) - } - } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - return true -} - -// Parse the productions: -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * ***** * -// -func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { - token := peek_token(parser) - if token == nil { - return false - } - if empty { - parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) - } - if token.typ == yaml_VALUE_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Generate an empty scalar event. -func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: mark, - end_mark: mark, - value: nil, // Empty - implicit: true, - style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), - } - return true -} - -var default_tag_directives = []yaml_tag_directive_t{ - {[]byte("!"), []byte("!")}, - {[]byte("!!"), []byte("tag:yaml.org,2002:")}, -} - -// Parse directives. -func yaml_parser_process_directives(parser *yaml_parser_t, - version_directive_ref **yaml_version_directive_t, - tag_directives_ref *[]yaml_tag_directive_t) bool { - - var version_directive *yaml_version_directive_t - var tag_directives []yaml_tag_directive_t - - token := peek_token(parser) - if token == nil { - return false - } - - for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { - if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { - if version_directive != nil { - yaml_parser_set_parser_error(parser, - "found duplicate %YAML directive", token.start_mark) - return false - } - if token.major != 1 || token.minor != 1 { - yaml_parser_set_parser_error(parser, - "found incompatible YAML document", token.start_mark) - return false - } - version_directive = &yaml_version_directive_t{ - major: token.major, - minor: token.minor, - } - } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { - value := yaml_tag_directive_t{ - handle: token.value, - prefix: token.prefix, - } - if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { - return false - } - tag_directives = append(tag_directives, value) - } - - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - - for i := range default_tag_directives { - if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { - return false - } - } - - if version_directive_ref != nil { - *version_directive_ref = version_directive - } - if tag_directives_ref != nil { - *tag_directives_ref = tag_directives - } - return true -} - -// Append a tag directive to the directives stack. -func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { - for i := range parser.tag_directives { - if bytes.Equal(value.handle, parser.tag_directives[i].handle) { - if allow_duplicates { - return true - } - return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) - } - } - - // [Go] I suspect the copy is unnecessary. This was likely done - // because there was no way to track ownership of the data. - value_copy := yaml_tag_directive_t{ - handle: make([]byte, len(value.handle)), - prefix: make([]byte, len(value.prefix)), - } - copy(value_copy.handle, value.handle) - copy(value_copy.prefix, value.prefix) - parser.tag_directives = append(parser.tag_directives, value_copy) - return true -} diff --git a/vendor/gopkg.in/yaml.v2/readerc.go b/vendor/gopkg.in/yaml.v2/readerc.go deleted file mode 100644 index 7c1f5fac3d..0000000000 --- a/vendor/gopkg.in/yaml.v2/readerc.go +++ /dev/null @@ -1,412 +0,0 @@ -package yaml - -import ( - "io" -) - -// Set the reader error and return 0. -func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { - parser.error = yaml_READER_ERROR - parser.problem = problem - parser.problem_offset = offset - parser.problem_value = value - return false -} - -// Byte order marks. -const ( - bom_UTF8 = "\xef\xbb\xbf" - bom_UTF16LE = "\xff\xfe" - bom_UTF16BE = "\xfe\xff" -) - -// Determine the input stream encoding by checking the BOM symbol. If no BOM is -// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. -func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { - // Ensure that we had enough bytes in the raw buffer. - for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { - if !yaml_parser_update_raw_buffer(parser) { - return false - } - } - - // Determine the encoding. - buf := parser.raw_buffer - pos := parser.raw_buffer_pos - avail := len(buf) - pos - if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { - parser.encoding = yaml_UTF16LE_ENCODING - parser.raw_buffer_pos += 2 - parser.offset += 2 - } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { - parser.encoding = yaml_UTF16BE_ENCODING - parser.raw_buffer_pos += 2 - parser.offset += 2 - } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { - parser.encoding = yaml_UTF8_ENCODING - parser.raw_buffer_pos += 3 - parser.offset += 3 - } else { - parser.encoding = yaml_UTF8_ENCODING - } - return true -} - -// Update the raw buffer. -func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { - size_read := 0 - - // Return if the raw buffer is full. - if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { - return true - } - - // Return on EOF. - if parser.eof { - return true - } - - // Move the remaining bytes in the raw buffer to the beginning. - if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { - copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) - } - parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] - parser.raw_buffer_pos = 0 - - // Call the read handler to fill the buffer. - size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) - parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] - if err == io.EOF { - parser.eof = true - } else if err != nil { - return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) - } - return true -} - -// Ensure that the buffer contains at least `length` characters. -// Return true on success, false on failure. -// -// The length is supposed to be significantly less that the buffer size. -func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { - if parser.read_handler == nil { - panic("read handler must be set") - } - - // [Go] This function was changed to guarantee the requested length size at EOF. - // The fact we need to do this is pretty awful, but the description above implies - // for that to be the case, and there are tests - - // If the EOF flag is set and the raw buffer is empty, do nothing. - if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { - // [Go] ACTUALLY! Read the documentation of this function above. - // This is just broken. To return true, we need to have the - // given length in the buffer. Not doing that means every single - // check that calls this function to make sure the buffer has a - // given length is Go) panicking; or C) accessing invalid memory. - //return true - } - - // Return if the buffer contains enough characters. - if parser.unread >= length { - return true - } - - // Determine the input encoding if it is not known yet. - if parser.encoding == yaml_ANY_ENCODING { - if !yaml_parser_determine_encoding(parser) { - return false - } - } - - // Move the unread characters to the beginning of the buffer. - buffer_len := len(parser.buffer) - if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { - copy(parser.buffer, parser.buffer[parser.buffer_pos:]) - buffer_len -= parser.buffer_pos - parser.buffer_pos = 0 - } else if parser.buffer_pos == buffer_len { - buffer_len = 0 - parser.buffer_pos = 0 - } - - // Open the whole buffer for writing, and cut it before returning. - parser.buffer = parser.buffer[:cap(parser.buffer)] - - // Fill the buffer until it has enough characters. - first := true - for parser.unread < length { - - // Fill the raw buffer if necessary. - if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { - if !yaml_parser_update_raw_buffer(parser) { - parser.buffer = parser.buffer[:buffer_len] - return false - } - } - first = false - - // Decode the raw buffer. - inner: - for parser.raw_buffer_pos != len(parser.raw_buffer) { - var value rune - var width int - - raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos - - // Decode the next character. - switch parser.encoding { - case yaml_UTF8_ENCODING: - // Decode a UTF-8 character. Check RFC 3629 - // (http://www.ietf.org/rfc/rfc3629.txt) for more details. - // - // The following table (taken from the RFC) is used for - // decoding. - // - // Char. number range | UTF-8 octet sequence - // (hexadecimal) | (binary) - // --------------------+------------------------------------ - // 0000 0000-0000 007F | 0xxxxxxx - // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx - // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx - // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - // - // Additionally, the characters in the range 0xD800-0xDFFF - // are prohibited as they are reserved for use with UTF-16 - // surrogate pairs. - - // Determine the length of the UTF-8 sequence. - octet := parser.raw_buffer[parser.raw_buffer_pos] - switch { - case octet&0x80 == 0x00: - width = 1 - case octet&0xE0 == 0xC0: - width = 2 - case octet&0xF0 == 0xE0: - width = 3 - case octet&0xF8 == 0xF0: - width = 4 - default: - // The leading octet is invalid. - return yaml_parser_set_reader_error(parser, - "invalid leading UTF-8 octet", - parser.offset, int(octet)) - } - - // Check if the raw buffer contains an incomplete character. - if width > raw_unread { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-8 octet sequence", - parser.offset, -1) - } - break inner - } - - // Decode the leading octet. - switch { - case octet&0x80 == 0x00: - value = rune(octet & 0x7F) - case octet&0xE0 == 0xC0: - value = rune(octet & 0x1F) - case octet&0xF0 == 0xE0: - value = rune(octet & 0x0F) - case octet&0xF8 == 0xF0: - value = rune(octet & 0x07) - default: - value = 0 - } - - // Check and decode the trailing octets. - for k := 1; k < width; k++ { - octet = parser.raw_buffer[parser.raw_buffer_pos+k] - - // Check if the octet is valid. - if (octet & 0xC0) != 0x80 { - return yaml_parser_set_reader_error(parser, - "invalid trailing UTF-8 octet", - parser.offset+k, int(octet)) - } - - // Decode the octet. - value = (value << 6) + rune(octet&0x3F) - } - - // Check the length of the sequence against the value. - switch { - case width == 1: - case width == 2 && value >= 0x80: - case width == 3 && value >= 0x800: - case width == 4 && value >= 0x10000: - default: - return yaml_parser_set_reader_error(parser, - "invalid length of a UTF-8 sequence", - parser.offset, -1) - } - - // Check the range of the value. - if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { - return yaml_parser_set_reader_error(parser, - "invalid Unicode character", - parser.offset, int(value)) - } - - case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: - var low, high int - if parser.encoding == yaml_UTF16LE_ENCODING { - low, high = 0, 1 - } else { - low, high = 1, 0 - } - - // The UTF-16 encoding is not as simple as one might - // naively think. Check RFC 2781 - // (http://www.ietf.org/rfc/rfc2781.txt). - // - // Normally, two subsequent bytes describe a Unicode - // character. However a special technique (called a - // surrogate pair) is used for specifying character - // values larger than 0xFFFF. - // - // A surrogate pair consists of two pseudo-characters: - // high surrogate area (0xD800-0xDBFF) - // low surrogate area (0xDC00-0xDFFF) - // - // The following formulas are used for decoding - // and encoding characters using surrogate pairs: - // - // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) - // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) - // W1 = 110110yyyyyyyyyy - // W2 = 110111xxxxxxxxxx - // - // where U is the character value, W1 is the high surrogate - // area, W2 is the low surrogate area. - - // Check for incomplete UTF-16 character. - if raw_unread < 2 { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-16 character", - parser.offset, -1) - } - break inner - } - - // Get the character. - value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + - (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) - - // Check for unexpected low surrogate area. - if value&0xFC00 == 0xDC00 { - return yaml_parser_set_reader_error(parser, - "unexpected low surrogate area", - parser.offset, int(value)) - } - - // Check for a high surrogate area. - if value&0xFC00 == 0xD800 { - width = 4 - - // Check for incomplete surrogate pair. - if raw_unread < 4 { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-16 surrogate pair", - parser.offset, -1) - } - break inner - } - - // Get the next character. - value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + - (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) - - // Check for a low surrogate area. - if value2&0xFC00 != 0xDC00 { - return yaml_parser_set_reader_error(parser, - "expected low surrogate area", - parser.offset+2, int(value2)) - } - - // Generate the value of the surrogate pair. - value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) - } else { - width = 2 - } - - default: - panic("impossible") - } - - // Check if the character is in the allowed range: - // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) - // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) - // | [#x10000-#x10FFFF] (32 bit) - switch { - case value == 0x09: - case value == 0x0A: - case value == 0x0D: - case value >= 0x20 && value <= 0x7E: - case value == 0x85: - case value >= 0xA0 && value <= 0xD7FF: - case value >= 0xE000 && value <= 0xFFFD: - case value >= 0x10000 && value <= 0x10FFFF: - default: - return yaml_parser_set_reader_error(parser, - "control characters are not allowed", - parser.offset, int(value)) - } - - // Move the raw pointers. - parser.raw_buffer_pos += width - parser.offset += width - - // Finally put the character into the buffer. - if value <= 0x7F { - // 0000 0000-0000 007F . 0xxxxxxx - parser.buffer[buffer_len+0] = byte(value) - buffer_len += 1 - } else if value <= 0x7FF { - // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) - parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) - buffer_len += 2 - } else if value <= 0xFFFF { - // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) - parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) - parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) - buffer_len += 3 - } else { - // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) - parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) - parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) - parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) - buffer_len += 4 - } - - parser.unread++ - } - - // On EOF, put NUL into the buffer and return. - if parser.eof { - parser.buffer[buffer_len] = 0 - buffer_len++ - parser.unread++ - break - } - } - // [Go] Read the documentation of this function above. To return true, - // we need to have the given length in the buffer. Not doing that means - // every single check that calls this function to make sure the buffer - // has a given length is Go) panicking; or C) accessing invalid memory. - // This happens here due to the EOF above breaking early. - for buffer_len < length { - parser.buffer[buffer_len] = 0 - buffer_len++ - } - parser.buffer = parser.buffer[:buffer_len] - return true -} diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/gopkg.in/yaml.v2/resolve.go deleted file mode 100644 index 4120e0c916..0000000000 --- a/vendor/gopkg.in/yaml.v2/resolve.go +++ /dev/null @@ -1,258 +0,0 @@ -package yaml - -import ( - "encoding/base64" - "math" - "regexp" - "strconv" - "strings" - "time" -) - -type resolveMapItem struct { - value interface{} - tag string -} - -var resolveTable = make([]byte, 256) -var resolveMap = make(map[string]resolveMapItem) - -func init() { - t := resolveTable - t[int('+')] = 'S' // Sign - t[int('-')] = 'S' - for _, c := range "0123456789" { - t[int(c)] = 'D' // Digit - } - for _, c := range "yYnNtTfFoO~" { - t[int(c)] = 'M' // In map - } - t[int('.')] = '.' // Float (potentially in map) - - var resolveMapList = []struct { - v interface{} - tag string - l []string - }{ - {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, - {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, - {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, - {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, - {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, - {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, - {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, - {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, - {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, - {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, - {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, - {"<<", yaml_MERGE_TAG, []string{"<<"}}, - } - - m := resolveMap - for _, item := range resolveMapList { - for _, s := range item.l { - m[s] = resolveMapItem{item.v, item.tag} - } - } -} - -const longTagPrefix = "tag:yaml.org,2002:" - -func shortTag(tag string) string { - // TODO This can easily be made faster and produce less garbage. - if strings.HasPrefix(tag, longTagPrefix) { - return "!!" + tag[len(longTagPrefix):] - } - return tag -} - -func longTag(tag string) string { - if strings.HasPrefix(tag, "!!") { - return longTagPrefix + tag[2:] - } - return tag -} - -func resolvableTag(tag string) bool { - switch tag { - case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG: - return true - } - return false -} - -var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) - -func resolve(tag string, in string) (rtag string, out interface{}) { - if !resolvableTag(tag) { - return tag, in - } - - defer func() { - switch tag { - case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: - return - case yaml_FLOAT_TAG: - if rtag == yaml_INT_TAG { - switch v := out.(type) { - case int64: - rtag = yaml_FLOAT_TAG - out = float64(v) - return - case int: - rtag = yaml_FLOAT_TAG - out = float64(v) - return - } - } - } - failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) - }() - - // Any data is accepted as a !!str or !!binary. - // Otherwise, the prefix is enough of a hint about what it might be. - hint := byte('N') - if in != "" { - hint = resolveTable[in[0]] - } - if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { - // Handle things we can lookup in a map. - if item, ok := resolveMap[in]; ok { - return item.tag, item.value - } - - // Base 60 floats are a bad idea, were dropped in YAML 1.2, and - // are purposefully unsupported here. They're still quoted on - // the way out for compatibility with other parser, though. - - switch hint { - case 'M': - // We've already checked the map above. - - case '.': - // Not in the map, so maybe a normal float. - floatv, err := strconv.ParseFloat(in, 64) - if err == nil { - return yaml_FLOAT_TAG, floatv - } - - case 'D', 'S': - // Int, float, or timestamp. - // Only try values as a timestamp if the value is unquoted or there's an explicit - // !!timestamp tag. - if tag == "" || tag == yaml_TIMESTAMP_TAG { - t, ok := parseTimestamp(in) - if ok { - return yaml_TIMESTAMP_TAG, t - } - } - - plain := strings.Replace(in, "_", "", -1) - intv, err := strconv.ParseInt(plain, 0, 64) - if err == nil { - if intv == int64(int(intv)) { - return yaml_INT_TAG, int(intv) - } else { - return yaml_INT_TAG, intv - } - } - uintv, err := strconv.ParseUint(plain, 0, 64) - if err == nil { - return yaml_INT_TAG, uintv - } - if yamlStyleFloat.MatchString(plain) { - floatv, err := strconv.ParseFloat(plain, 64) - if err == nil { - return yaml_FLOAT_TAG, floatv - } - } - if strings.HasPrefix(plain, "0b") { - intv, err := strconv.ParseInt(plain[2:], 2, 64) - if err == nil { - if intv == int64(int(intv)) { - return yaml_INT_TAG, int(intv) - } else { - return yaml_INT_TAG, intv - } - } - uintv, err := strconv.ParseUint(plain[2:], 2, 64) - if err == nil { - return yaml_INT_TAG, uintv - } - } else if strings.HasPrefix(plain, "-0b") { - intv, err := strconv.ParseInt("-" + plain[3:], 2, 64) - if err == nil { - if true || intv == int64(int(intv)) { - return yaml_INT_TAG, int(intv) - } else { - return yaml_INT_TAG, intv - } - } - } - default: - panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") - } - } - return yaml_STR_TAG, in -} - -// encodeBase64 encodes s as base64 that is broken up into multiple lines -// as appropriate for the resulting length. -func encodeBase64(s string) string { - const lineLen = 70 - encLen := base64.StdEncoding.EncodedLen(len(s)) - lines := encLen/lineLen + 1 - buf := make([]byte, encLen*2+lines) - in := buf[0:encLen] - out := buf[encLen:] - base64.StdEncoding.Encode(in, []byte(s)) - k := 0 - for i := 0; i < len(in); i += lineLen { - j := i + lineLen - if j > len(in) { - j = len(in) - } - k += copy(out[k:], in[i:j]) - if lines > 1 { - out[k] = '\n' - k++ - } - } - return string(out[:k]) -} - -// This is a subset of the formats allowed by the regular expression -// defined at http://yaml.org/type/timestamp.html. -var allowedTimestampFormats = []string{ - "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. - "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". - "2006-1-2 15:4:5.999999999", // space separated with no time zone - "2006-1-2", // date only - // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" - // from the set of examples. -} - -// parseTimestamp parses s as a timestamp string and -// returns the timestamp and reports whether it succeeded. -// Timestamp formats are defined at http://yaml.org/type/timestamp.html -func parseTimestamp(s string) (time.Time, bool) { - // TODO write code to check all the formats supported by - // http://yaml.org/type/timestamp.html instead of using time.Parse. - - // Quick check: all date formats start with YYYY-. - i := 0 - for ; i < len(s); i++ { - if c := s[i]; c < '0' || c > '9' { - break - } - } - if i != 4 || i == len(s) || s[i] != '-' { - return time.Time{}, false - } - for _, format := range allowedTimestampFormats { - if t, err := time.Parse(format, s); err == nil { - return t, true - } - } - return time.Time{}, false -} diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go deleted file mode 100644 index 0b9bb6030a..0000000000 --- a/vendor/gopkg.in/yaml.v2/scannerc.go +++ /dev/null @@ -1,2711 +0,0 @@ -package yaml - -import ( - "bytes" - "fmt" -) - -// Introduction -// ************ -// -// The following notes assume that you are familiar with the YAML specification -// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in -// some cases we are less restrictive that it requires. -// -// The process of transforming a YAML stream into a sequence of events is -// divided on two steps: Scanning and Parsing. -// -// The Scanner transforms the input stream into a sequence of tokens, while the -// parser transform the sequence of tokens produced by the Scanner into a -// sequence of parsing events. -// -// The Scanner is rather clever and complicated. The Parser, on the contrary, -// is a straightforward implementation of a recursive-descendant parser (or, -// LL(1) parser, as it is usually called). -// -// Actually there are two issues of Scanning that might be called "clever", the -// rest is quite straightforward. The issues are "block collection start" and -// "simple keys". Both issues are explained below in details. -// -// Here the Scanning step is explained and implemented. We start with the list -// of all the tokens produced by the Scanner together with short descriptions. -// -// Now, tokens: -// -// STREAM-START(encoding) # The stream start. -// STREAM-END # The stream end. -// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. -// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. -// DOCUMENT-START # '---' -// DOCUMENT-END # '...' -// BLOCK-SEQUENCE-START # Indentation increase denoting a block -// BLOCK-MAPPING-START # sequence or a block mapping. -// BLOCK-END # Indentation decrease. -// FLOW-SEQUENCE-START # '[' -// FLOW-SEQUENCE-END # ']' -// BLOCK-SEQUENCE-START # '{' -// BLOCK-SEQUENCE-END # '}' -// BLOCK-ENTRY # '-' -// FLOW-ENTRY # ',' -// KEY # '?' or nothing (simple keys). -// VALUE # ':' -// ALIAS(anchor) # '*anchor' -// ANCHOR(anchor) # '&anchor' -// TAG(handle,suffix) # '!handle!suffix' -// SCALAR(value,style) # A scalar. -// -// The following two tokens are "virtual" tokens denoting the beginning and the -// end of the stream: -// -// STREAM-START(encoding) -// STREAM-END -// -// We pass the information about the input stream encoding with the -// STREAM-START token. -// -// The next two tokens are responsible for tags: -// -// VERSION-DIRECTIVE(major,minor) -// TAG-DIRECTIVE(handle,prefix) -// -// Example: -// -// %YAML 1.1 -// %TAG ! !foo -// %TAG !yaml! tag:yaml.org,2002: -// --- -// -// The correspoding sequence of tokens: -// -// STREAM-START(utf-8) -// VERSION-DIRECTIVE(1,1) -// TAG-DIRECTIVE("!","!foo") -// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") -// DOCUMENT-START -// STREAM-END -// -// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole -// line. -// -// The document start and end indicators are represented by: -// -// DOCUMENT-START -// DOCUMENT-END -// -// Note that if a YAML stream contains an implicit document (without '---' -// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be -// produced. -// -// In the following examples, we present whole documents together with the -// produced tokens. -// -// 1. An implicit document: -// -// 'a scalar' -// -// Tokens: -// -// STREAM-START(utf-8) -// SCALAR("a scalar",single-quoted) -// STREAM-END -// -// 2. An explicit document: -// -// --- -// 'a scalar' -// ... -// -// Tokens: -// -// STREAM-START(utf-8) -// DOCUMENT-START -// SCALAR("a scalar",single-quoted) -// DOCUMENT-END -// STREAM-END -// -// 3. Several documents in a stream: -// -// 'a scalar' -// --- -// 'another scalar' -// --- -// 'yet another scalar' -// -// Tokens: -// -// STREAM-START(utf-8) -// SCALAR("a scalar",single-quoted) -// DOCUMENT-START -// SCALAR("another scalar",single-quoted) -// DOCUMENT-START -// SCALAR("yet another scalar",single-quoted) -// STREAM-END -// -// We have already introduced the SCALAR token above. The following tokens are -// used to describe aliases, anchors, tag, and scalars: -// -// ALIAS(anchor) -// ANCHOR(anchor) -// TAG(handle,suffix) -// SCALAR(value,style) -// -// The following series of examples illustrate the usage of these tokens: -// -// 1. A recursive sequence: -// -// &A [ *A ] -// -// Tokens: -// -// STREAM-START(utf-8) -// ANCHOR("A") -// FLOW-SEQUENCE-START -// ALIAS("A") -// FLOW-SEQUENCE-END -// STREAM-END -// -// 2. A tagged scalar: -// -// !!float "3.14" # A good approximation. -// -// Tokens: -// -// STREAM-START(utf-8) -// TAG("!!","float") -// SCALAR("3.14",double-quoted) -// STREAM-END -// -// 3. Various scalar styles: -// -// --- # Implicit empty plain scalars do not produce tokens. -// --- a plain scalar -// --- 'a single-quoted scalar' -// --- "a double-quoted scalar" -// --- |- -// a literal scalar -// --- >- -// a folded -// scalar -// -// Tokens: -// -// STREAM-START(utf-8) -// DOCUMENT-START -// DOCUMENT-START -// SCALAR("a plain scalar",plain) -// DOCUMENT-START -// SCALAR("a single-quoted scalar",single-quoted) -// DOCUMENT-START -// SCALAR("a double-quoted scalar",double-quoted) -// DOCUMENT-START -// SCALAR("a literal scalar",literal) -// DOCUMENT-START -// SCALAR("a folded scalar",folded) -// STREAM-END -// -// Now it's time to review collection-related tokens. We will start with -// flow collections: -// -// FLOW-SEQUENCE-START -// FLOW-SEQUENCE-END -// FLOW-MAPPING-START -// FLOW-MAPPING-END -// FLOW-ENTRY -// KEY -// VALUE -// -// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and -// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' -// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the -// indicators '?' and ':', which are used for denoting mapping keys and values, -// are represented by the KEY and VALUE tokens. -// -// The following examples show flow collections: -// -// 1. A flow sequence: -// -// [item 1, item 2, item 3] -// -// Tokens: -// -// STREAM-START(utf-8) -// FLOW-SEQUENCE-START -// SCALAR("item 1",plain) -// FLOW-ENTRY -// SCALAR("item 2",plain) -// FLOW-ENTRY -// SCALAR("item 3",plain) -// FLOW-SEQUENCE-END -// STREAM-END -// -// 2. A flow mapping: -// -// { -// a simple key: a value, # Note that the KEY token is produced. -// ? a complex key: another value, -// } -// -// Tokens: -// -// STREAM-START(utf-8) -// FLOW-MAPPING-START -// KEY -// SCALAR("a simple key",plain) -// VALUE -// SCALAR("a value",plain) -// FLOW-ENTRY -// KEY -// SCALAR("a complex key",plain) -// VALUE -// SCALAR("another value",plain) -// FLOW-ENTRY -// FLOW-MAPPING-END -// STREAM-END -// -// A simple key is a key which is not denoted by the '?' indicator. Note that -// the Scanner still produce the KEY token whenever it encounters a simple key. -// -// For scanning block collections, the following tokens are used (note that we -// repeat KEY and VALUE here): -// -// BLOCK-SEQUENCE-START -// BLOCK-MAPPING-START -// BLOCK-END -// BLOCK-ENTRY -// KEY -// VALUE -// -// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation -// increase that precedes a block collection (cf. the INDENT token in Python). -// The token BLOCK-END denote indentation decrease that ends a block collection -// (cf. the DEDENT token in Python). However YAML has some syntax pecularities -// that makes detections of these tokens more complex. -// -// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators -// '-', '?', and ':' correspondingly. -// -// The following examples show how the tokens BLOCK-SEQUENCE-START, -// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: -// -// 1. Block sequences: -// -// - item 1 -// - item 2 -// - -// - item 3.1 -// - item 3.2 -// - -// key 1: value 1 -// key 2: value 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-ENTRY -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 3.1",plain) -// BLOCK-ENTRY -// SCALAR("item 3.2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// 2. Block mappings: -// -// a simple key: a value # The KEY token is produced here. -// ? a complex key -// : another value -// a mapping: -// key 1: value 1 -// key 2: value 2 -// a sequence: -// - item 1 -// - item 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("a simple key",plain) -// VALUE -// SCALAR("a value",plain) -// KEY -// SCALAR("a complex key",plain) -// VALUE -// SCALAR("another value",plain) -// KEY -// SCALAR("a mapping",plain) -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// KEY -// SCALAR("a sequence",plain) -// VALUE -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// YAML does not always require to start a new block collection from a new -// line. If the current line contains only '-', '?', and ':' indicators, a new -// block collection may start at the current line. The following examples -// illustrate this case: -// -// 1. Collections in a sequence: -// -// - - item 1 -// - item 2 -// - key 1: value 1 -// key 2: value 2 -// - ? complex key -// : complex value -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("complex key") -// VALUE -// SCALAR("complex value") -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// 2. Collections in a mapping: -// -// ? a sequence -// : - item 1 -// - item 2 -// ? a mapping -// : key 1: value 1 -// key 2: value 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("a sequence",plain) -// VALUE -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// KEY -// SCALAR("a mapping",plain) -// VALUE -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// YAML also permits non-indented sequences if they are included into a block -// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: -// -// key: -// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. -// - item 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("key",plain) -// VALUE -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// - -// Ensure that the buffer contains the required number of characters. -// Return true on success, false on failure (reader error or memory error). -func cache(parser *yaml_parser_t, length int) bool { - // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) - return parser.unread >= length || yaml_parser_update_buffer(parser, length) -} - -// Advance the buffer pointer. -func skip(parser *yaml_parser_t) { - parser.mark.index++ - parser.mark.column++ - parser.unread-- - parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) -} - -func skip_line(parser *yaml_parser_t) { - if is_crlf(parser.buffer, parser.buffer_pos) { - parser.mark.index += 2 - parser.mark.column = 0 - parser.mark.line++ - parser.unread -= 2 - parser.buffer_pos += 2 - } else if is_break(parser.buffer, parser.buffer_pos) { - parser.mark.index++ - parser.mark.column = 0 - parser.mark.line++ - parser.unread-- - parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) - } -} - -// Copy a character to a string buffer and advance pointers. -func read(parser *yaml_parser_t, s []byte) []byte { - w := width(parser.buffer[parser.buffer_pos]) - if w == 0 { - panic("invalid character sequence") - } - if len(s) == 0 { - s = make([]byte, 0, 32) - } - if w == 1 && len(s)+w <= cap(s) { - s = s[:len(s)+1] - s[len(s)-1] = parser.buffer[parser.buffer_pos] - parser.buffer_pos++ - } else { - s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) - parser.buffer_pos += w - } - parser.mark.index++ - parser.mark.column++ - parser.unread-- - return s -} - -// Copy a line break character to a string buffer and advance pointers. -func read_line(parser *yaml_parser_t, s []byte) []byte { - buf := parser.buffer - pos := parser.buffer_pos - switch { - case buf[pos] == '\r' && buf[pos+1] == '\n': - // CR LF . LF - s = append(s, '\n') - parser.buffer_pos += 2 - parser.mark.index++ - parser.unread-- - case buf[pos] == '\r' || buf[pos] == '\n': - // CR|LF . LF - s = append(s, '\n') - parser.buffer_pos += 1 - case buf[pos] == '\xC2' && buf[pos+1] == '\x85': - // NEL . LF - s = append(s, '\n') - parser.buffer_pos += 2 - case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): - // LS|PS . LS|PS - s = append(s, buf[parser.buffer_pos:pos+3]...) - parser.buffer_pos += 3 - default: - return s - } - parser.mark.index++ - parser.mark.column = 0 - parser.mark.line++ - parser.unread-- - return s -} - -// Get the next token. -func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { - // Erase the token object. - *token = yaml_token_t{} // [Go] Is this necessary? - - // No tokens after STREAM-END or error. - if parser.stream_end_produced || parser.error != yaml_NO_ERROR { - return true - } - - // Ensure that the tokens queue contains enough tokens. - if !parser.token_available { - if !yaml_parser_fetch_more_tokens(parser) { - return false - } - } - - // Fetch the next token from the queue. - *token = parser.tokens[parser.tokens_head] - parser.tokens_head++ - parser.tokens_parsed++ - parser.token_available = false - - if token.typ == yaml_STREAM_END_TOKEN { - parser.stream_end_produced = true - } - return true -} - -// Set the scanner error and return false. -func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { - parser.error = yaml_SCANNER_ERROR - parser.context = context - parser.context_mark = context_mark - parser.problem = problem - parser.problem_mark = parser.mark - return false -} - -func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { - context := "while parsing a tag" - if directive { - context = "while parsing a %TAG directive" - } - return yaml_parser_set_scanner_error(parser, context, context_mark, problem) -} - -func trace(args ...interface{}) func() { - pargs := append([]interface{}{"+++"}, args...) - fmt.Println(pargs...) - pargs = append([]interface{}{"---"}, args...) - return func() { fmt.Println(pargs...) } -} - -// Ensure that the tokens queue contains at least one token which can be -// returned to the Parser. -func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { - // While we need more tokens to fetch, do it. - for { - if parser.tokens_head != len(parser.tokens) { - // If queue is non-empty, check if any potential simple key may - // occupy the head position. - head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed] - if !ok { - break - } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok { - return false - } else if !valid { - break - } - } - // Fetch the next token. - if !yaml_parser_fetch_next_token(parser) { - return false - } - } - - parser.token_available = true - return true -} - -// The dispatcher for token fetchers. -func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { - // Ensure that the buffer is initialized. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check if we just started scanning. Fetch STREAM-START then. - if !parser.stream_start_produced { - return yaml_parser_fetch_stream_start(parser) - } - - // Eat whitespaces and comments until we reach the next token. - if !yaml_parser_scan_to_next_token(parser) { - return false - } - - // Check the indentation level against the current column. - if !yaml_parser_unroll_indent(parser, parser.mark.column) { - return false - } - - // Ensure that the buffer contains at least 4 characters. 4 is the length - // of the longest indicators ('--- ' and '... '). - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - - // Is it the end of the stream? - if is_z(parser.buffer, parser.buffer_pos) { - return yaml_parser_fetch_stream_end(parser) - } - - // Is it a directive? - if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { - return yaml_parser_fetch_directive(parser) - } - - buf := parser.buffer - pos := parser.buffer_pos - - // Is it the document start indicator? - if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { - return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) - } - - // Is it the document end indicator? - if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { - return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) - } - - // Is it the flow sequence start indicator? - if buf[pos] == '[' { - return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) - } - - // Is it the flow mapping start indicator? - if parser.buffer[parser.buffer_pos] == '{' { - return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) - } - - // Is it the flow sequence end indicator? - if parser.buffer[parser.buffer_pos] == ']' { - return yaml_parser_fetch_flow_collection_end(parser, - yaml_FLOW_SEQUENCE_END_TOKEN) - } - - // Is it the flow mapping end indicator? - if parser.buffer[parser.buffer_pos] == '}' { - return yaml_parser_fetch_flow_collection_end(parser, - yaml_FLOW_MAPPING_END_TOKEN) - } - - // Is it the flow entry indicator? - if parser.buffer[parser.buffer_pos] == ',' { - return yaml_parser_fetch_flow_entry(parser) - } - - // Is it the block entry indicator? - if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { - return yaml_parser_fetch_block_entry(parser) - } - - // Is it the key indicator? - if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_key(parser) - } - - // Is it the value indicator? - if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_value(parser) - } - - // Is it an alias? - if parser.buffer[parser.buffer_pos] == '*' { - return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) - } - - // Is it an anchor? - if parser.buffer[parser.buffer_pos] == '&' { - return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) - } - - // Is it a tag? - if parser.buffer[parser.buffer_pos] == '!' { - return yaml_parser_fetch_tag(parser) - } - - // Is it a literal scalar? - if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { - return yaml_parser_fetch_block_scalar(parser, true) - } - - // Is it a folded scalar? - if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { - return yaml_parser_fetch_block_scalar(parser, false) - } - - // Is it a single-quoted scalar? - if parser.buffer[parser.buffer_pos] == '\'' { - return yaml_parser_fetch_flow_scalar(parser, true) - } - - // Is it a double-quoted scalar? - if parser.buffer[parser.buffer_pos] == '"' { - return yaml_parser_fetch_flow_scalar(parser, false) - } - - // Is it a plain scalar? - // - // A plain scalar may start with any non-blank characters except - // - // '-', '?', ':', ',', '[', ']', '{', '}', - // '#', '&', '*', '!', '|', '>', '\'', '\"', - // '%', '@', '`'. - // - // In the block context (and, for the '-' indicator, in the flow context - // too), it may also start with the characters - // - // '-', '?', ':' - // - // if it is followed by a non-space character. - // - // The last rule is more restrictive than the specification requires. - // [Go] Make this logic more reasonable. - //switch parser.buffer[parser.buffer_pos] { - //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': - //} - if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || - parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || - parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || - parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || - parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || - parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || - parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || - parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || - parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || - (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || - (parser.flow_level == 0 && - (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && - !is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_plain_scalar(parser) - } - - // If we don't determine the token type so far, it is an error. - return yaml_parser_set_scanner_error(parser, - "while scanning for the next token", parser.mark, - "found character that cannot start any token") -} - -func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) { - if !simple_key.possible { - return false, true - } - - // The 1.2 specification says: - // - // "If the ? indicator is omitted, parsing needs to see past the - // implicit key to recognize it as such. To limit the amount of - // lookahead required, the “:” indicator must appear at most 1024 - // Unicode characters beyond the start of the key. In addition, the key - // is restricted to a single line." - // - if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index { - // Check if the potential simple key to be removed is required. - if simple_key.required { - return false, yaml_parser_set_scanner_error(parser, - "while scanning a simple key", simple_key.mark, - "could not find expected ':'") - } - simple_key.possible = false - return false, true - } - return true, true -} - -// Check if a simple key may start at the current position and add it if -// needed. -func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { - // A simple key is required at the current position if the scanner is in - // the block context and the current column coincides with the indentation - // level. - - required := parser.flow_level == 0 && parser.indent == parser.mark.column - - // - // If the current position may start a simple key, save it. - // - if parser.simple_key_allowed { - simple_key := yaml_simple_key_t{ - possible: true, - required: required, - token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), - mark: parser.mark, - } - - if !yaml_parser_remove_simple_key(parser) { - return false - } - parser.simple_keys[len(parser.simple_keys)-1] = simple_key - parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1 - } - return true -} - -// Remove a potential simple key at the current flow level. -func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { - i := len(parser.simple_keys) - 1 - if parser.simple_keys[i].possible { - // If the key is required, it is an error. - if parser.simple_keys[i].required { - return yaml_parser_set_scanner_error(parser, - "while scanning a simple key", parser.simple_keys[i].mark, - "could not find expected ':'") - } - // Remove the key from the stack. - parser.simple_keys[i].possible = false - delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number) - } - return true -} - -// max_flow_level limits the flow_level -const max_flow_level = 10000 - -// Increase the flow level and resize the simple key list if needed. -func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { - // Reset the simple key on the next level. - parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{ - possible: false, - required: false, - token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), - mark: parser.mark, - }) - - // Increase the flow level. - parser.flow_level++ - if parser.flow_level > max_flow_level { - return yaml_parser_set_scanner_error(parser, - "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark, - fmt.Sprintf("exceeded max depth of %d", max_flow_level)) - } - return true -} - -// Decrease the flow level. -func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { - if parser.flow_level > 0 { - parser.flow_level-- - last := len(parser.simple_keys) - 1 - delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number) - parser.simple_keys = parser.simple_keys[:last] - } - return true -} - -// max_indents limits the indents stack size -const max_indents = 10000 - -// Push the current indentation level to the stack and set the new level -// the current column is greater than the indentation level. In this case, -// append or insert the specified token into the token queue. -func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { - // In the flow context, do nothing. - if parser.flow_level > 0 { - return true - } - - if parser.indent < column { - // Push the current indentation level to the stack and set the new - // indentation level. - parser.indents = append(parser.indents, parser.indent) - parser.indent = column - if len(parser.indents) > max_indents { - return yaml_parser_set_scanner_error(parser, - "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark, - fmt.Sprintf("exceeded max depth of %d", max_indents)) - } - - // Create a token and insert it into the queue. - token := yaml_token_t{ - typ: typ, - start_mark: mark, - end_mark: mark, - } - if number > -1 { - number -= parser.tokens_parsed - } - yaml_insert_token(parser, number, &token) - } - return true -} - -// Pop indentation levels from the indents stack until the current level -// becomes less or equal to the column. For each indentation level, append -// the BLOCK-END token. -func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { - // In the flow context, do nothing. - if parser.flow_level > 0 { - return true - } - - // Loop through the indentation levels in the stack. - for parser.indent > column { - // Create a token and append it to the queue. - token := yaml_token_t{ - typ: yaml_BLOCK_END_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - } - yaml_insert_token(parser, -1, &token) - - // Pop the indentation level. - parser.indent = parser.indents[len(parser.indents)-1] - parser.indents = parser.indents[:len(parser.indents)-1] - } - return true -} - -// Initialize the scanner and produce the STREAM-START token. -func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { - - // Set the initial indentation. - parser.indent = -1 - - // Initialize the simple key stack. - parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) - - parser.simple_keys_by_tok = make(map[int]int) - - // A simple key is allowed at the beginning of the stream. - parser.simple_key_allowed = true - - // We have started. - parser.stream_start_produced = true - - // Create the STREAM-START token and append it to the queue. - token := yaml_token_t{ - typ: yaml_STREAM_START_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - encoding: parser.encoding, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the STREAM-END token and shut down the scanner. -func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { - - // Force new line. - if parser.mark.column != 0 { - parser.mark.column = 0 - parser.mark.line++ - } - - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Create the STREAM-END token and append it to the queue. - token := yaml_token_t{ - typ: yaml_STREAM_END_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. -func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. - token := yaml_token_t{} - if !yaml_parser_scan_directive(parser, &token) { - return false - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the DOCUMENT-START or DOCUMENT-END token. -func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Consume the token. - start_mark := parser.mark - - skip(parser) - skip(parser) - skip(parser) - - end_mark := parser.mark - - // Create the DOCUMENT-START or DOCUMENT-END token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. -func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // The indicators '[' and '{' may start a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // Increase the flow level. - if !yaml_parser_increase_flow_level(parser) { - return false - } - - // A simple key may follow the indicators '[' and '{'. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. -func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // Reset any potential simple key on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Decrease the flow level. - if !yaml_parser_decrease_flow_level(parser) { - return false - } - - // No simple keys after the indicators ']' and '}'. - parser.simple_key_allowed = false - - // Consume the token. - - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-ENTRY token. -func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after ','. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-ENTRY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_FLOW_ENTRY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the BLOCK-ENTRY token. -func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { - // Check if the scanner is in the block context. - if parser.flow_level == 0 { - // Check if we are allowed to start a new entry. - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "block sequence entries are not allowed in this context") - } - // Add the BLOCK-SEQUENCE-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { - return false - } - } else { - // It is an error for the '-' indicator to occur in the flow context, - // but we let the Parser detect and report about it because the Parser - // is able to point to the context. - } - - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after '-'. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the BLOCK-ENTRY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_BLOCK_ENTRY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the KEY token. -func yaml_parser_fetch_key(parser *yaml_parser_t) bool { - - // In the block context, additional checks are required. - if parser.flow_level == 0 { - // Check if we are allowed to start a new key (not nessesary simple). - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "mapping keys are not allowed in this context") - } - // Add the BLOCK-MAPPING-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { - return false - } - } - - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after '?' in the block context. - parser.simple_key_allowed = parser.flow_level == 0 - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the KEY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_KEY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the VALUE token. -func yaml_parser_fetch_value(parser *yaml_parser_t) bool { - - simple_key := &parser.simple_keys[len(parser.simple_keys)-1] - - // Have we found a simple key? - if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok { - return false - - } else if valid { - - // Create the KEY token and insert it into the queue. - token := yaml_token_t{ - typ: yaml_KEY_TOKEN, - start_mark: simple_key.mark, - end_mark: simple_key.mark, - } - yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) - - // In the block context, we may need to add the BLOCK-MAPPING-START token. - if !yaml_parser_roll_indent(parser, simple_key.mark.column, - simple_key.token_number, - yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { - return false - } - - // Remove the simple key. - simple_key.possible = false - delete(parser.simple_keys_by_tok, simple_key.token_number) - - // A simple key cannot follow another simple key. - parser.simple_key_allowed = false - - } else { - // The ':' indicator follows a complex key. - - // In the block context, extra checks are required. - if parser.flow_level == 0 { - - // Check if we are allowed to start a complex value. - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "mapping values are not allowed in this context") - } - - // Add the BLOCK-MAPPING-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { - return false - } - } - - // Simple keys after ':' are allowed in the block context. - parser.simple_key_allowed = parser.flow_level == 0 - } - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the VALUE token and append it to the queue. - token := yaml_token_t{ - typ: yaml_VALUE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the ALIAS or ANCHOR token. -func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // An anchor or an alias could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow an anchor or an alias. - parser.simple_key_allowed = false - - // Create the ALIAS or ANCHOR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_anchor(parser, &token, typ) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the TAG token. -func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { - // A tag could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a tag. - parser.simple_key_allowed = false - - // Create the TAG token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_tag(parser, &token) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. -func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { - // Remove any potential simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // A simple key may follow a block scalar. - parser.simple_key_allowed = true - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_block_scalar(parser, &token, literal) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. -func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { - // A plain scalar could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a flow scalar. - parser.simple_key_allowed = false - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_flow_scalar(parser, &token, single) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,plain) token. -func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { - // A plain scalar could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a flow scalar. - parser.simple_key_allowed = false - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_plain_scalar(parser, &token) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Eat whitespaces and comments until the next token is found. -func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { - - // Until the next token is not found. - for { - // Allow the BOM mark to start a line. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { - skip(parser) - } - - // Eat whitespaces. - // Tabs are allowed: - // - in the flow context - // - in the block context, but not at the beginning of the line or - // after '-', '?', or ':' (complex value). - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Eat a comment until a line break. - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // If it is a line break, eat it. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - - // In the block context, a new line may start a simple key. - if parser.flow_level == 0 { - parser.simple_key_allowed = true - } - } else { - break // We have found a token. - } - } - - return true -} - -// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// -func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { - // Eat '%'. - start_mark := parser.mark - skip(parser) - - // Scan the directive name. - var name []byte - if !yaml_parser_scan_directive_name(parser, start_mark, &name) { - return false - } - - // Is it a YAML directive? - if bytes.Equal(name, []byte("YAML")) { - // Scan the VERSION directive value. - var major, minor int8 - if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { - return false - } - end_mark := parser.mark - - // Create a VERSION-DIRECTIVE token. - *token = yaml_token_t{ - typ: yaml_VERSION_DIRECTIVE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - major: major, - minor: minor, - } - - // Is it a TAG directive? - } else if bytes.Equal(name, []byte("TAG")) { - // Scan the TAG directive value. - var handle, prefix []byte - if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { - return false - } - end_mark := parser.mark - - // Create a TAG-DIRECTIVE token. - *token = yaml_token_t{ - typ: yaml_TAG_DIRECTIVE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: handle, - prefix: prefix, - } - - // Unknown directive. - } else { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "found unknown directive name") - return false - } - - // Eat the rest of the line including any comments. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // Check if we are at the end of the line. - if !is_breakz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "did not find expected comment or line break") - return false - } - - // Eat a line break. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - } - - return true -} - -// Scan the directive name. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^ -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^ -// -func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { - // Consume the directive name. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - var s []byte - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the name is empty. - if len(s) == 0 { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "could not find expected directive name") - return false - } - - // Check for an blank character after the name. - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "found unexpected non-alphabetical character") - return false - } - *name = s - return true -} - -// Scan the value of VERSION-DIRECTIVE. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^^^ -func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { - // Eat whitespaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Consume the major version number. - if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { - return false - } - - // Eat '.'. - if parser.buffer[parser.buffer_pos] != '.' { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "did not find expected digit or '.' character") - } - - skip(parser) - - // Consume the minor version number. - if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { - return false - } - return true -} - -const max_number_length = 2 - -// Scan the version number of VERSION-DIRECTIVE. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^ -// %YAML 1.1 # a comment \n -// ^ -func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { - - // Repeat while the next character is digit. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - var value, length int8 - for is_digit(parser.buffer, parser.buffer_pos) { - // Check if the number is too long. - length++ - if length > max_number_length { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "found extremely long version number") - } - value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the number was present. - if length == 0 { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "did not find expected version number") - } - *number = value - return true -} - -// Scan the value of a TAG-DIRECTIVE token. -// -// Scope: -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// -func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { - var handle_value, prefix_value []byte - - // Eat whitespaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Scan a handle. - if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { - return false - } - - // Expect a whitespace. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blank(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", - start_mark, "did not find expected whitespace") - return false - } - - // Eat whitespaces. - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Scan a prefix. - if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { - return false - } - - // Expect a whitespace or line break. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", - start_mark, "did not find expected whitespace or line break") - return false - } - - *handle = handle_value - *prefix = prefix_value - return true -} - -func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { - var s []byte - - // Eat the indicator character. - start_mark := parser.mark - skip(parser) - - // Consume the value. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - end_mark := parser.mark - - /* - * Check if length of the anchor is greater than 0 and it is followed by - * a whitespace character or one of the indicators: - * - * '?', ':', ',', ']', '}', '%', '@', '`'. - */ - - if len(s) == 0 || - !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || - parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || - parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || - parser.buffer[parser.buffer_pos] == '`') { - context := "while scanning an alias" - if typ == yaml_ANCHOR_TOKEN { - context = "while scanning an anchor" - } - yaml_parser_set_scanner_error(parser, context, start_mark, - "did not find expected alphabetic or numeric character") - return false - } - - // Create a token. - *token = yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - value: s, - } - - return true -} - -/* - * Scan a TAG token. - */ - -func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { - var handle, suffix []byte - - start_mark := parser.mark - - // Check if the tag is in the canonical form. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - if parser.buffer[parser.buffer_pos+1] == '<' { - // Keep the handle as '' - - // Eat '!<' - skip(parser) - skip(parser) - - // Consume the tag value. - if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { - return false - } - - // Check for '>' and eat it. - if parser.buffer[parser.buffer_pos] != '>' { - yaml_parser_set_scanner_error(parser, "while scanning a tag", - start_mark, "did not find the expected '>'") - return false - } - - skip(parser) - } else { - // The tag has either the '!suffix' or the '!handle!suffix' form. - - // First, try to scan a handle. - if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { - return false - } - - // Check if it is, indeed, handle. - if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { - // Scan the suffix now. - if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { - return false - } - } else { - // It wasn't a handle after all. Scan the rest of the tag. - if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { - return false - } - - // Set the handle to '!'. - handle = []byte{'!'} - - // A special case: the '!' tag. Set the handle to '' and the - // suffix to '!'. - if len(suffix) == 0 { - handle, suffix = suffix, handle - } - } - } - - // Check the character which ends the tag. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a tag", - start_mark, "did not find expected whitespace or line break") - return false - } - - end_mark := parser.mark - - // Create a token. - *token = yaml_token_t{ - typ: yaml_TAG_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: handle, - suffix: suffix, - } - return true -} - -// Scan a tag handle. -func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { - // Check the initial '!' character. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.buffer[parser.buffer_pos] != '!' { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected '!'") - return false - } - - var s []byte - - // Copy the '!' character. - s = read(parser, s) - - // Copy all subsequent alphabetical and numerical characters. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the trailing character is '!' and copy it. - if parser.buffer[parser.buffer_pos] == '!' { - s = read(parser, s) - } else { - // It's either the '!' tag or not really a tag handle. If it's a %TAG - // directive, it's an error. If it's a tag token, it must be a part of URI. - if directive && string(s) != "!" { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected '!'") - return false - } - } - - *handle = s - return true -} - -// Scan a tag. -func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { - //size_t length = head ? strlen((char *)head) : 0 - var s []byte - hasTag := len(head) > 0 - - // Copy the head if needed. - // - // Note that we don't copy the leading '!' character. - if len(head) > 1 { - s = append(s, head[1:]...) - } - - // Scan the tag. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // The set of characters that may appear in URI is as follows: - // - // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', - // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', - // '%'. - // [Go] Convert this into more reasonable logic. - for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || - parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || - parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || - parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || - parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || - parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || - parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || - parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || - parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || - parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || - parser.buffer[parser.buffer_pos] == '%' { - // Check if it is a URI-escape sequence. - if parser.buffer[parser.buffer_pos] == '%' { - if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { - return false - } - } else { - s = read(parser, s) - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - hasTag = true - } - - if !hasTag { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected tag URI") - return false - } - *uri = s - return true -} - -// Decode an URI-escape sequence corresponding to a single UTF-8 character. -func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { - - // Decode the required number of characters. - w := 1024 - for w > 0 { - // Check for a URI-escaped octet. - if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { - return false - } - - if !(parser.buffer[parser.buffer_pos] == '%' && - is_hex(parser.buffer, parser.buffer_pos+1) && - is_hex(parser.buffer, parser.buffer_pos+2)) { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find URI escaped octet") - } - - // Get the octet. - octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) - - // If it is the leading octet, determine the length of the UTF-8 sequence. - if w == 1024 { - w = width(octet) - if w == 0 { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "found an incorrect leading UTF-8 octet") - } - } else { - // Check if the trailing octet is correct. - if octet&0xC0 != 0x80 { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "found an incorrect trailing UTF-8 octet") - } - } - - // Copy the octet and move the pointers. - *s = append(*s, octet) - skip(parser) - skip(parser) - skip(parser) - w-- - } - return true -} - -// Scan a block scalar. -func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { - // Eat the indicator '|' or '>'. - start_mark := parser.mark - skip(parser) - - // Scan the additional block scalar indicators. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check for a chomping indicator. - var chomping, increment int - if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { - // Set the chomping method and eat the indicator. - if parser.buffer[parser.buffer_pos] == '+' { - chomping = +1 - } else { - chomping = -1 - } - skip(parser) - - // Check for an indentation indicator. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if is_digit(parser.buffer, parser.buffer_pos) { - // Check that the indentation is greater than 0. - if parser.buffer[parser.buffer_pos] == '0' { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found an indentation indicator equal to 0") - return false - } - - // Get the indentation level and eat the indicator. - increment = as_digit(parser.buffer, parser.buffer_pos) - skip(parser) - } - - } else if is_digit(parser.buffer, parser.buffer_pos) { - // Do the same as above, but in the opposite order. - - if parser.buffer[parser.buffer_pos] == '0' { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found an indentation indicator equal to 0") - return false - } - increment = as_digit(parser.buffer, parser.buffer_pos) - skip(parser) - - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { - if parser.buffer[parser.buffer_pos] == '+' { - chomping = +1 - } else { - chomping = -1 - } - skip(parser) - } - } - - // Eat whitespaces and comments to the end of the line. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // Check if we are at the end of the line. - if !is_breakz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "did not find expected comment or line break") - return false - } - - // Eat a line break. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - } - - end_mark := parser.mark - - // Set the indentation level if it was specified. - var indent int - if increment > 0 { - if parser.indent >= 0 { - indent = parser.indent + increment - } else { - indent = increment - } - } - - // Scan the leading line breaks and determine the indentation level if needed. - var s, leading_break, trailing_breaks []byte - if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { - return false - } - - // Scan the block scalar content. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - var leading_blank, trailing_blank bool - for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { - // We are at the beginning of a non-empty line. - - // Is it a trailing whitespace? - trailing_blank = is_blank(parser.buffer, parser.buffer_pos) - - // Check if we need to fold the leading line break. - if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { - // Do we need to join the lines by space? - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } - } else { - s = append(s, leading_break...) - } - leading_break = leading_break[:0] - - // Append the remaining line breaks. - s = append(s, trailing_breaks...) - trailing_breaks = trailing_breaks[:0] - - // Is it a leading whitespace? - leading_blank = is_blank(parser.buffer, parser.buffer_pos) - - // Consume the current line. - for !is_breakz(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Consume the line break. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - leading_break = read_line(parser, leading_break) - - // Eat the following indentation spaces and line breaks. - if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { - return false - } - } - - // Chomp the tail. - if chomping != -1 { - s = append(s, leading_break...) - } - if chomping == 1 { - s = append(s, trailing_breaks...) - } - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_LITERAL_SCALAR_STYLE, - } - if !literal { - token.style = yaml_FOLDED_SCALAR_STYLE - } - return true -} - -// Scan indentation spaces and line breaks for a block scalar. Determine the -// indentation level if needed. -func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { - *end_mark = parser.mark - - // Eat the indentation spaces and line breaks. - max_indent := 0 - for { - // Eat the indentation spaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - if parser.mark.column > max_indent { - max_indent = parser.mark.column - } - - // Check for a tab character messing the indentation. - if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { - return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found a tab character where an indentation space is expected") - } - - // Have we found a non-empty line? - if !is_break(parser.buffer, parser.buffer_pos) { - break - } - - // Consume the line break. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - // [Go] Should really be returning breaks instead. - *breaks = read_line(parser, *breaks) - *end_mark = parser.mark - } - - // Determine the indentation level if needed. - if *indent == 0 { - *indent = max_indent - if *indent < parser.indent+1 { - *indent = parser.indent + 1 - } - if *indent < 1 { - *indent = 1 - } - } - return true -} - -// Scan a quoted scalar. -func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { - // Eat the left quote. - start_mark := parser.mark - skip(parser) - - // Consume the content of the quoted scalar. - var s, leading_break, trailing_breaks, whitespaces []byte - for { - // Check that there are no document indicators at the beginning of the line. - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - - if parser.mark.column == 0 && - ((parser.buffer[parser.buffer_pos+0] == '-' && - parser.buffer[parser.buffer_pos+1] == '-' && - parser.buffer[parser.buffer_pos+2] == '-') || - (parser.buffer[parser.buffer_pos+0] == '.' && - parser.buffer[parser.buffer_pos+1] == '.' && - parser.buffer[parser.buffer_pos+2] == '.')) && - is_blankz(parser.buffer, parser.buffer_pos+3) { - yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", - start_mark, "found unexpected document indicator") - return false - } - - // Check for EOF. - if is_z(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", - start_mark, "found unexpected end of stream") - return false - } - - // Consume non-blank characters. - leading_blanks := false - for !is_blankz(parser.buffer, parser.buffer_pos) { - if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { - // Is is an escaped single quote. - s = append(s, '\'') - skip(parser) - skip(parser) - - } else if single && parser.buffer[parser.buffer_pos] == '\'' { - // It is a right single quote. - break - } else if !single && parser.buffer[parser.buffer_pos] == '"' { - // It is a right double quote. - break - - } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { - // It is an escaped line break. - if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { - return false - } - skip(parser) - skip_line(parser) - leading_blanks = true - break - - } else if !single && parser.buffer[parser.buffer_pos] == '\\' { - // It is an escape sequence. - code_length := 0 - - // Check the escape character. - switch parser.buffer[parser.buffer_pos+1] { - case '0': - s = append(s, 0) - case 'a': - s = append(s, '\x07') - case 'b': - s = append(s, '\x08') - case 't', '\t': - s = append(s, '\x09') - case 'n': - s = append(s, '\x0A') - case 'v': - s = append(s, '\x0B') - case 'f': - s = append(s, '\x0C') - case 'r': - s = append(s, '\x0D') - case 'e': - s = append(s, '\x1B') - case ' ': - s = append(s, '\x20') - case '"': - s = append(s, '"') - case '\'': - s = append(s, '\'') - case '\\': - s = append(s, '\\') - case 'N': // NEL (#x85) - s = append(s, '\xC2') - s = append(s, '\x85') - case '_': // #xA0 - s = append(s, '\xC2') - s = append(s, '\xA0') - case 'L': // LS (#x2028) - s = append(s, '\xE2') - s = append(s, '\x80') - s = append(s, '\xA8') - case 'P': // PS (#x2029) - s = append(s, '\xE2') - s = append(s, '\x80') - s = append(s, '\xA9') - case 'x': - code_length = 2 - case 'u': - code_length = 4 - case 'U': - code_length = 8 - default: - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "found unknown escape character") - return false - } - - skip(parser) - skip(parser) - - // Consume an arbitrary escape code. - if code_length > 0 { - var value int - - // Scan the character value. - if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { - return false - } - for k := 0; k < code_length; k++ { - if !is_hex(parser.buffer, parser.buffer_pos+k) { - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "did not find expected hexdecimal number") - return false - } - value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) - } - - // Check the value and write the character. - if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "found invalid Unicode character escape code") - return false - } - if value <= 0x7F { - s = append(s, byte(value)) - } else if value <= 0x7FF { - s = append(s, byte(0xC0+(value>>6))) - s = append(s, byte(0x80+(value&0x3F))) - } else if value <= 0xFFFF { - s = append(s, byte(0xE0+(value>>12))) - s = append(s, byte(0x80+((value>>6)&0x3F))) - s = append(s, byte(0x80+(value&0x3F))) - } else { - s = append(s, byte(0xF0+(value>>18))) - s = append(s, byte(0x80+((value>>12)&0x3F))) - s = append(s, byte(0x80+((value>>6)&0x3F))) - s = append(s, byte(0x80+(value&0x3F))) - } - - // Advance the pointer. - for k := 0; k < code_length; k++ { - skip(parser) - } - } - } else { - // It is a non-escaped non-blank character. - s = read(parser, s) - } - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - } - - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check if we are at the end of the scalar. - if single { - if parser.buffer[parser.buffer_pos] == '\'' { - break - } - } else { - if parser.buffer[parser.buffer_pos] == '"' { - break - } - } - - // Consume blank characters. - for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { - if is_blank(parser.buffer, parser.buffer_pos) { - // Consume a space or a tab character. - if !leading_blanks { - whitespaces = read(parser, whitespaces) - } else { - skip(parser) - } - } else { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - // Check if it is a first line break. - if !leading_blanks { - whitespaces = whitespaces[:0] - leading_break = read_line(parser, leading_break) - leading_blanks = true - } else { - trailing_breaks = read_line(parser, trailing_breaks) - } - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Join the whitespaces or fold line breaks. - if leading_blanks { - // Do we need to fold line breaks? - if len(leading_break) > 0 && leading_break[0] == '\n' { - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } else { - s = append(s, trailing_breaks...) - } - } else { - s = append(s, leading_break...) - s = append(s, trailing_breaks...) - } - trailing_breaks = trailing_breaks[:0] - leading_break = leading_break[:0] - } else { - s = append(s, whitespaces...) - whitespaces = whitespaces[:0] - } - } - - // Eat the right quote. - skip(parser) - end_mark := parser.mark - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_SINGLE_QUOTED_SCALAR_STYLE, - } - if !single { - token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - return true -} - -// Scan a plain scalar. -func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { - - var s, leading_break, trailing_breaks, whitespaces []byte - var leading_blanks bool - var indent = parser.indent + 1 - - start_mark := parser.mark - end_mark := parser.mark - - // Consume the content of the plain scalar. - for { - // Check for a document indicator. - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - if parser.mark.column == 0 && - ((parser.buffer[parser.buffer_pos+0] == '-' && - parser.buffer[parser.buffer_pos+1] == '-' && - parser.buffer[parser.buffer_pos+2] == '-') || - (parser.buffer[parser.buffer_pos+0] == '.' && - parser.buffer[parser.buffer_pos+1] == '.' && - parser.buffer[parser.buffer_pos+2] == '.')) && - is_blankz(parser.buffer, parser.buffer_pos+3) { - break - } - - // Check for a comment. - if parser.buffer[parser.buffer_pos] == '#' { - break - } - - // Consume non-blank characters. - for !is_blankz(parser.buffer, parser.buffer_pos) { - - // Check for indicators that may end a plain scalar. - if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || - (parser.flow_level > 0 && - (parser.buffer[parser.buffer_pos] == ',' || - parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || - parser.buffer[parser.buffer_pos] == '}')) { - break - } - - // Check if we need to join whitespaces and breaks. - if leading_blanks || len(whitespaces) > 0 { - if leading_blanks { - // Do we need to fold line breaks? - if leading_break[0] == '\n' { - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } else { - s = append(s, trailing_breaks...) - } - } else { - s = append(s, leading_break...) - s = append(s, trailing_breaks...) - } - trailing_breaks = trailing_breaks[:0] - leading_break = leading_break[:0] - leading_blanks = false - } else { - s = append(s, whitespaces...) - whitespaces = whitespaces[:0] - } - } - - // Copy the character. - s = read(parser, s) - - end_mark = parser.mark - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - } - - // Is it the end? - if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { - break - } - - // Consume blank characters. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { - if is_blank(parser.buffer, parser.buffer_pos) { - - // Check for tab characters that abuse indentation. - if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", - start_mark, "found a tab character that violates indentation") - return false - } - - // Consume a space or a tab character. - if !leading_blanks { - whitespaces = read(parser, whitespaces) - } else { - skip(parser) - } - } else { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - // Check if it is a first line break. - if !leading_blanks { - whitespaces = whitespaces[:0] - leading_break = read_line(parser, leading_break) - leading_blanks = true - } else { - trailing_breaks = read_line(parser, trailing_breaks) - } - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check indentation level. - if parser.flow_level == 0 && parser.mark.column < indent { - break - } - } - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_PLAIN_SCALAR_STYLE, - } - - // Note that we change the 'simple_key_allowed' flag. - if leading_blanks { - parser.simple_key_allowed = true - } - return true -} diff --git a/vendor/gopkg.in/yaml.v2/sorter.go b/vendor/gopkg.in/yaml.v2/sorter.go deleted file mode 100644 index 4c45e660a8..0000000000 --- a/vendor/gopkg.in/yaml.v2/sorter.go +++ /dev/null @@ -1,113 +0,0 @@ -package yaml - -import ( - "reflect" - "unicode" -) - -type keyList []reflect.Value - -func (l keyList) Len() int { return len(l) } -func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } -func (l keyList) Less(i, j int) bool { - a := l[i] - b := l[j] - ak := a.Kind() - bk := b.Kind() - for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { - a = a.Elem() - ak = a.Kind() - } - for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { - b = b.Elem() - bk = b.Kind() - } - af, aok := keyFloat(a) - bf, bok := keyFloat(b) - if aok && bok { - if af != bf { - return af < bf - } - if ak != bk { - return ak < bk - } - return numLess(a, b) - } - if ak != reflect.String || bk != reflect.String { - return ak < bk - } - ar, br := []rune(a.String()), []rune(b.String()) - for i := 0; i < len(ar) && i < len(br); i++ { - if ar[i] == br[i] { - continue - } - al := unicode.IsLetter(ar[i]) - bl := unicode.IsLetter(br[i]) - if al && bl { - return ar[i] < br[i] - } - if al || bl { - return bl - } - var ai, bi int - var an, bn int64 - if ar[i] == '0' || br[i] == '0' { - for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- { - if ar[j] != '0' { - an = 1 - bn = 1 - break - } - } - } - for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { - an = an*10 + int64(ar[ai]-'0') - } - for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { - bn = bn*10 + int64(br[bi]-'0') - } - if an != bn { - return an < bn - } - if ai != bi { - return ai < bi - } - return ar[i] < br[i] - } - return len(ar) < len(br) -} - -// keyFloat returns a float value for v if it is a number/bool -// and whether it is a number/bool or not. -func keyFloat(v reflect.Value) (f float64, ok bool) { - switch v.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return float64(v.Int()), true - case reflect.Float32, reflect.Float64: - return v.Float(), true - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return float64(v.Uint()), true - case reflect.Bool: - if v.Bool() { - return 1, true - } - return 0, true - } - return 0, false -} - -// numLess returns whether a < b. -// a and b must necessarily have the same kind. -func numLess(a, b reflect.Value) bool { - switch a.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return a.Int() < b.Int() - case reflect.Float32, reflect.Float64: - return a.Float() < b.Float() - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return a.Uint() < b.Uint() - case reflect.Bool: - return !a.Bool() && b.Bool() - } - panic("not a number") -} diff --git a/vendor/gopkg.in/yaml.v2/writerc.go b/vendor/gopkg.in/yaml.v2/writerc.go deleted file mode 100644 index a2dde608cb..0000000000 --- a/vendor/gopkg.in/yaml.v2/writerc.go +++ /dev/null @@ -1,26 +0,0 @@ -package yaml - -// Set the writer error and return false. -func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { - emitter.error = yaml_WRITER_ERROR - emitter.problem = problem - return false -} - -// Flush the output buffer. -func yaml_emitter_flush(emitter *yaml_emitter_t) bool { - if emitter.write_handler == nil { - panic("write handler not set") - } - - // Check if the buffer is empty. - if emitter.buffer_pos == 0 { - return true - } - - if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { - return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) - } - emitter.buffer_pos = 0 - return true -} diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/gopkg.in/yaml.v2/yaml.go deleted file mode 100644 index 30813884c0..0000000000 --- a/vendor/gopkg.in/yaml.v2/yaml.go +++ /dev/null @@ -1,478 +0,0 @@ -// Package yaml implements YAML support for the Go language. -// -// Source code and other details for the project are available at GitHub: -// -// https://github.com/go-yaml/yaml -// -package yaml - -import ( - "errors" - "fmt" - "io" - "reflect" - "strings" - "sync" -) - -// MapSlice encodes and decodes as a YAML map. -// The order of keys is preserved when encoding and decoding. -type MapSlice []MapItem - -// MapItem is an item in a MapSlice. -type MapItem struct { - Key, Value interface{} -} - -// The Unmarshaler interface may be implemented by types to customize their -// behavior when being unmarshaled from a YAML document. The UnmarshalYAML -// method receives a function that may be called to unmarshal the original -// YAML value into a field or variable. It is safe to call the unmarshal -// function parameter more than once if necessary. -type Unmarshaler interface { - UnmarshalYAML(unmarshal func(interface{}) error) error -} - -// The Marshaler interface may be implemented by types to customize their -// behavior when being marshaled into a YAML document. The returned value -// is marshaled in place of the original value implementing Marshaler. -// -// If an error is returned by MarshalYAML, the marshaling procedure stops -// and returns with the provided error. -type Marshaler interface { - MarshalYAML() (interface{}, error) -} - -// Unmarshal decodes the first document found within the in byte slice -// and assigns decoded values into the out value. -// -// Maps and pointers (to a struct, string, int, etc) are accepted as out -// values. If an internal pointer within a struct is not initialized, -// the yaml package will initialize it if necessary for unmarshalling -// the provided data. The out parameter must not be nil. -// -// The type of the decoded values should be compatible with the respective -// values in out. If one or more values cannot be decoded due to a type -// mismatches, decoding continues partially until the end of the YAML -// content, and a *yaml.TypeError is returned with details for all -// missed values. -// -// Struct fields are only unmarshalled if they are exported (have an -// upper case first letter), and are unmarshalled using the field name -// lowercased as the default key. Custom keys may be defined via the -// "yaml" name in the field tag: the content preceding the first comma -// is used as the key, and the following comma-separated options are -// used to tweak the marshalling process (see Marshal). -// Conflicting names result in a runtime error. -// -// For example: -// -// type T struct { -// F int `yaml:"a,omitempty"` -// B int -// } -// var t T -// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) -// -// See the documentation of Marshal for the format of tags and a list of -// supported tag options. -// -func Unmarshal(in []byte, out interface{}) (err error) { - return unmarshal(in, out, false) -} - -// UnmarshalStrict is like Unmarshal except that any fields that are found -// in the data that do not have corresponding struct members, or mapping -// keys that are duplicates, will result in -// an error. -func UnmarshalStrict(in []byte, out interface{}) (err error) { - return unmarshal(in, out, true) -} - -// A Decoder reads and decodes YAML values from an input stream. -type Decoder struct { - strict bool - parser *parser -} - -// NewDecoder returns a new decoder that reads from r. -// -// The decoder introduces its own buffering and may read -// data from r beyond the YAML values requested. -func NewDecoder(r io.Reader) *Decoder { - return &Decoder{ - parser: newParserFromReader(r), - } -} - -// SetStrict sets whether strict decoding behaviour is enabled when -// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict. -func (dec *Decoder) SetStrict(strict bool) { - dec.strict = strict -} - -// Decode reads the next YAML-encoded value from its input -// and stores it in the value pointed to by v. -// -// See the documentation for Unmarshal for details about the -// conversion of YAML into a Go value. -func (dec *Decoder) Decode(v interface{}) (err error) { - d := newDecoder(dec.strict) - defer handleErr(&err) - node := dec.parser.parse() - if node == nil { - return io.EOF - } - out := reflect.ValueOf(v) - if out.Kind() == reflect.Ptr && !out.IsNil() { - out = out.Elem() - } - d.unmarshal(node, out) - if len(d.terrors) > 0 { - return &TypeError{d.terrors} - } - return nil -} - -func unmarshal(in []byte, out interface{}, strict bool) (err error) { - defer handleErr(&err) - d := newDecoder(strict) - p := newParser(in) - defer p.destroy() - node := p.parse() - if node != nil { - v := reflect.ValueOf(out) - if v.Kind() == reflect.Ptr && !v.IsNil() { - v = v.Elem() - } - d.unmarshal(node, v) - } - if len(d.terrors) > 0 { - return &TypeError{d.terrors} - } - return nil -} - -// Marshal serializes the value provided into a YAML document. The structure -// of the generated document will reflect the structure of the value itself. -// Maps and pointers (to struct, string, int, etc) are accepted as the in value. -// -// Struct fields are only marshalled if they are exported (have an upper case -// first letter), and are marshalled using the field name lowercased as the -// default key. Custom keys may be defined via the "yaml" name in the field -// tag: the content preceding the first comma is used as the key, and the -// following comma-separated options are used to tweak the marshalling process. -// Conflicting names result in a runtime error. -// -// The field tag format accepted is: -// -// `(...) yaml:"[][,[,]]" (...)` -// -// The following flags are currently supported: -// -// omitempty Only include the field if it's not set to the zero -// value for the type or to empty slices or maps. -// Zero valued structs will be omitted if all their public -// fields are zero, unless they implement an IsZero -// method (see the IsZeroer interface type), in which -// case the field will be excluded if IsZero returns true. -// -// flow Marshal using a flow style (useful for structs, -// sequences and maps). -// -// inline Inline the field, which must be a struct or a map, -// causing all of its fields or keys to be processed as if -// they were part of the outer struct. For maps, keys must -// not conflict with the yaml keys of other struct fields. -// -// In addition, if the key is "-", the field is ignored. -// -// For example: -// -// type T struct { -// F int `yaml:"a,omitempty"` -// B int -// } -// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" -// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" -// -func Marshal(in interface{}) (out []byte, err error) { - defer handleErr(&err) - e := newEncoder() - defer e.destroy() - e.marshalDoc("", reflect.ValueOf(in)) - e.finish() - out = e.out - return -} - -// An Encoder writes YAML values to an output stream. -type Encoder struct { - encoder *encoder -} - -// NewEncoder returns a new encoder that writes to w. -// The Encoder should be closed after use to flush all data -// to w. -func NewEncoder(w io.Writer) *Encoder { - return &Encoder{ - encoder: newEncoderWithWriter(w), - } -} - -// Encode writes the YAML encoding of v to the stream. -// If multiple items are encoded to the stream, the -// second and subsequent document will be preceded -// with a "---" document separator, but the first will not. -// -// See the documentation for Marshal for details about the conversion of Go -// values to YAML. -func (e *Encoder) Encode(v interface{}) (err error) { - defer handleErr(&err) - e.encoder.marshalDoc("", reflect.ValueOf(v)) - return nil -} - -// Close closes the encoder by writing any remaining data. -// It does not write a stream terminating string "...". -func (e *Encoder) Close() (err error) { - defer handleErr(&err) - e.encoder.finish() - return nil -} - -func handleErr(err *error) { - if v := recover(); v != nil { - if e, ok := v.(yamlError); ok { - *err = e.err - } else { - panic(v) - } - } -} - -type yamlError struct { - err error -} - -func fail(err error) { - panic(yamlError{err}) -} - -func failf(format string, args ...interface{}) { - panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) -} - -// A TypeError is returned by Unmarshal when one or more fields in -// the YAML document cannot be properly decoded into the requested -// types. When this error is returned, the value is still -// unmarshaled partially. -type TypeError struct { - Errors []string -} - -func (e *TypeError) Error() string { - return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) -} - -// -------------------------------------------------------------------------- -// Maintain a mapping of keys to structure field indexes - -// The code in this section was copied from mgo/bson. - -// structInfo holds details for the serialization of fields of -// a given struct. -type structInfo struct { - FieldsMap map[string]fieldInfo - FieldsList []fieldInfo - - // InlineMap is the number of the field in the struct that - // contains an ,inline map, or -1 if there's none. - InlineMap int -} - -type fieldInfo struct { - Key string - Num int - OmitEmpty bool - Flow bool - // Id holds the unique field identifier, so we can cheaply - // check for field duplicates without maintaining an extra map. - Id int - - // Inline holds the field index if the field is part of an inlined struct. - Inline []int -} - -var structMap = make(map[reflect.Type]*structInfo) -var fieldMapMutex sync.RWMutex - -func getStructInfo(st reflect.Type) (*structInfo, error) { - fieldMapMutex.RLock() - sinfo, found := structMap[st] - fieldMapMutex.RUnlock() - if found { - return sinfo, nil - } - - n := st.NumField() - fieldsMap := make(map[string]fieldInfo) - fieldsList := make([]fieldInfo, 0, n) - inlineMap := -1 - for i := 0; i != n; i++ { - field := st.Field(i) - if field.PkgPath != "" && !field.Anonymous { - continue // Private field - } - - info := fieldInfo{Num: i} - - tag := field.Tag.Get("yaml") - if tag == "" && strings.Index(string(field.Tag), ":") < 0 { - tag = string(field.Tag) - } - if tag == "-" { - continue - } - - inline := false - fields := strings.Split(tag, ",") - if len(fields) > 1 { - for _, flag := range fields[1:] { - switch flag { - case "omitempty": - info.OmitEmpty = true - case "flow": - info.Flow = true - case "inline": - inline = true - default: - return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) - } - } - tag = fields[0] - } - - if inline { - switch field.Type.Kind() { - case reflect.Map: - if inlineMap >= 0 { - return nil, errors.New("Multiple ,inline maps in struct " + st.String()) - } - if field.Type.Key() != reflect.TypeOf("") { - return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) - } - inlineMap = info.Num - case reflect.Struct: - sinfo, err := getStructInfo(field.Type) - if err != nil { - return nil, err - } - for _, finfo := range sinfo.FieldsList { - if _, found := fieldsMap[finfo.Key]; found { - msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() - return nil, errors.New(msg) - } - if finfo.Inline == nil { - finfo.Inline = []int{i, finfo.Num} - } else { - finfo.Inline = append([]int{i}, finfo.Inline...) - } - finfo.Id = len(fieldsList) - fieldsMap[finfo.Key] = finfo - fieldsList = append(fieldsList, finfo) - } - default: - //return nil, errors.New("Option ,inline needs a struct value or map field") - return nil, errors.New("Option ,inline needs a struct value field") - } - continue - } - - if tag != "" { - info.Key = tag - } else { - info.Key = strings.ToLower(field.Name) - } - - if _, found = fieldsMap[info.Key]; found { - msg := "Duplicated key '" + info.Key + "' in struct " + st.String() - return nil, errors.New(msg) - } - - info.Id = len(fieldsList) - fieldsList = append(fieldsList, info) - fieldsMap[info.Key] = info - } - - sinfo = &structInfo{ - FieldsMap: fieldsMap, - FieldsList: fieldsList, - InlineMap: inlineMap, - } - - fieldMapMutex.Lock() - structMap[st] = sinfo - fieldMapMutex.Unlock() - return sinfo, nil -} - -// IsZeroer is used to check whether an object is zero to -// determine whether it should be omitted when marshaling -// with the omitempty flag. One notable implementation -// is time.Time. -type IsZeroer interface { - IsZero() bool -} - -func isZero(v reflect.Value) bool { - kind := v.Kind() - if z, ok := v.Interface().(IsZeroer); ok { - if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { - return true - } - return z.IsZero() - } - switch kind { - case reflect.String: - return len(v.String()) == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - case reflect.Slice: - return v.Len() == 0 - case reflect.Map: - return v.Len() == 0 - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Struct: - vt := v.Type() - for i := v.NumField() - 1; i >= 0; i-- { - if vt.Field(i).PkgPath != "" { - continue // Private field - } - if !isZero(v.Field(i)) { - return false - } - } - return true - } - return false -} - -// FutureLineWrap globally disables line wrapping when encoding long strings. -// This is a temporary and thus deprecated method introduced to faciliate -// migration towards v3, which offers more control of line lengths on -// individual encodings, and has a default matching the behavior introduced -// by this function. -// -// The default formatting of v2 was erroneously changed in v2.3.0 and reverted -// in v2.4.0, at which point this function was introduced to help migration. -func FutureLineWrap() { - disableLineWrapping = true -} diff --git a/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/gopkg.in/yaml.v2/yamlh.go deleted file mode 100644 index f6a9c8e34b..0000000000 --- a/vendor/gopkg.in/yaml.v2/yamlh.go +++ /dev/null @@ -1,739 +0,0 @@ -package yaml - -import ( - "fmt" - "io" -) - -// The version directive data. -type yaml_version_directive_t struct { - major int8 // The major version number. - minor int8 // The minor version number. -} - -// The tag directive data. -type yaml_tag_directive_t struct { - handle []byte // The tag handle. - prefix []byte // The tag prefix. -} - -type yaml_encoding_t int - -// The stream encoding. -const ( - // Let the parser choose the encoding. - yaml_ANY_ENCODING yaml_encoding_t = iota - - yaml_UTF8_ENCODING // The default UTF-8 encoding. - yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. - yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. -) - -type yaml_break_t int - -// Line break types. -const ( - // Let the parser choose the break type. - yaml_ANY_BREAK yaml_break_t = iota - - yaml_CR_BREAK // Use CR for line breaks (Mac style). - yaml_LN_BREAK // Use LN for line breaks (Unix style). - yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). -) - -type yaml_error_type_t int - -// Many bad things could happen with the parser and emitter. -const ( - // No error is produced. - yaml_NO_ERROR yaml_error_type_t = iota - - yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. - yaml_READER_ERROR // Cannot read or decode the input stream. - yaml_SCANNER_ERROR // Cannot scan the input stream. - yaml_PARSER_ERROR // Cannot parse the input stream. - yaml_COMPOSER_ERROR // Cannot compose a YAML document. - yaml_WRITER_ERROR // Cannot write to the output stream. - yaml_EMITTER_ERROR // Cannot emit a YAML stream. -) - -// The pointer position. -type yaml_mark_t struct { - index int // The position index. - line int // The position line. - column int // The position column. -} - -// Node Styles - -type yaml_style_t int8 - -type yaml_scalar_style_t yaml_style_t - -// Scalar styles. -const ( - // Let the emitter choose the style. - yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota - - yaml_PLAIN_SCALAR_STYLE // The plain scalar style. - yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. - yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. - yaml_LITERAL_SCALAR_STYLE // The literal scalar style. - yaml_FOLDED_SCALAR_STYLE // The folded scalar style. -) - -type yaml_sequence_style_t yaml_style_t - -// Sequence styles. -const ( - // Let the emitter choose the style. - yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota - - yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. - yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. -) - -type yaml_mapping_style_t yaml_style_t - -// Mapping styles. -const ( - // Let the emitter choose the style. - yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota - - yaml_BLOCK_MAPPING_STYLE // The block mapping style. - yaml_FLOW_MAPPING_STYLE // The flow mapping style. -) - -// Tokens - -type yaml_token_type_t int - -// Token types. -const ( - // An empty token. - yaml_NO_TOKEN yaml_token_type_t = iota - - yaml_STREAM_START_TOKEN // A STREAM-START token. - yaml_STREAM_END_TOKEN // A STREAM-END token. - - yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. - yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. - yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. - yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. - - yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. - yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. - yaml_BLOCK_END_TOKEN // A BLOCK-END token. - - yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. - yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. - yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. - yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. - - yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. - yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. - yaml_KEY_TOKEN // A KEY token. - yaml_VALUE_TOKEN // A VALUE token. - - yaml_ALIAS_TOKEN // An ALIAS token. - yaml_ANCHOR_TOKEN // An ANCHOR token. - yaml_TAG_TOKEN // A TAG token. - yaml_SCALAR_TOKEN // A SCALAR token. -) - -func (tt yaml_token_type_t) String() string { - switch tt { - case yaml_NO_TOKEN: - return "yaml_NO_TOKEN" - case yaml_STREAM_START_TOKEN: - return "yaml_STREAM_START_TOKEN" - case yaml_STREAM_END_TOKEN: - return "yaml_STREAM_END_TOKEN" - case yaml_VERSION_DIRECTIVE_TOKEN: - return "yaml_VERSION_DIRECTIVE_TOKEN" - case yaml_TAG_DIRECTIVE_TOKEN: - return "yaml_TAG_DIRECTIVE_TOKEN" - case yaml_DOCUMENT_START_TOKEN: - return "yaml_DOCUMENT_START_TOKEN" - case yaml_DOCUMENT_END_TOKEN: - return "yaml_DOCUMENT_END_TOKEN" - case yaml_BLOCK_SEQUENCE_START_TOKEN: - return "yaml_BLOCK_SEQUENCE_START_TOKEN" - case yaml_BLOCK_MAPPING_START_TOKEN: - return "yaml_BLOCK_MAPPING_START_TOKEN" - case yaml_BLOCK_END_TOKEN: - return "yaml_BLOCK_END_TOKEN" - case yaml_FLOW_SEQUENCE_START_TOKEN: - return "yaml_FLOW_SEQUENCE_START_TOKEN" - case yaml_FLOW_SEQUENCE_END_TOKEN: - return "yaml_FLOW_SEQUENCE_END_TOKEN" - case yaml_FLOW_MAPPING_START_TOKEN: - return "yaml_FLOW_MAPPING_START_TOKEN" - case yaml_FLOW_MAPPING_END_TOKEN: - return "yaml_FLOW_MAPPING_END_TOKEN" - case yaml_BLOCK_ENTRY_TOKEN: - return "yaml_BLOCK_ENTRY_TOKEN" - case yaml_FLOW_ENTRY_TOKEN: - return "yaml_FLOW_ENTRY_TOKEN" - case yaml_KEY_TOKEN: - return "yaml_KEY_TOKEN" - case yaml_VALUE_TOKEN: - return "yaml_VALUE_TOKEN" - case yaml_ALIAS_TOKEN: - return "yaml_ALIAS_TOKEN" - case yaml_ANCHOR_TOKEN: - return "yaml_ANCHOR_TOKEN" - case yaml_TAG_TOKEN: - return "yaml_TAG_TOKEN" - case yaml_SCALAR_TOKEN: - return "yaml_SCALAR_TOKEN" - } - return "" -} - -// The token structure. -type yaml_token_t struct { - // The token type. - typ yaml_token_type_t - - // The start/end of the token. - start_mark, end_mark yaml_mark_t - - // The stream encoding (for yaml_STREAM_START_TOKEN). - encoding yaml_encoding_t - - // The alias/anchor/scalar value or tag/tag directive handle - // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). - value []byte - - // The tag suffix (for yaml_TAG_TOKEN). - suffix []byte - - // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). - prefix []byte - - // The scalar style (for yaml_SCALAR_TOKEN). - style yaml_scalar_style_t - - // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). - major, minor int8 -} - -// Events - -type yaml_event_type_t int8 - -// Event types. -const ( - // An empty event. - yaml_NO_EVENT yaml_event_type_t = iota - - yaml_STREAM_START_EVENT // A STREAM-START event. - yaml_STREAM_END_EVENT // A STREAM-END event. - yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. - yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. - yaml_ALIAS_EVENT // An ALIAS event. - yaml_SCALAR_EVENT // A SCALAR event. - yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. - yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. - yaml_MAPPING_START_EVENT // A MAPPING-START event. - yaml_MAPPING_END_EVENT // A MAPPING-END event. -) - -var eventStrings = []string{ - yaml_NO_EVENT: "none", - yaml_STREAM_START_EVENT: "stream start", - yaml_STREAM_END_EVENT: "stream end", - yaml_DOCUMENT_START_EVENT: "document start", - yaml_DOCUMENT_END_EVENT: "document end", - yaml_ALIAS_EVENT: "alias", - yaml_SCALAR_EVENT: "scalar", - yaml_SEQUENCE_START_EVENT: "sequence start", - yaml_SEQUENCE_END_EVENT: "sequence end", - yaml_MAPPING_START_EVENT: "mapping start", - yaml_MAPPING_END_EVENT: "mapping end", -} - -func (e yaml_event_type_t) String() string { - if e < 0 || int(e) >= len(eventStrings) { - return fmt.Sprintf("unknown event %d", e) - } - return eventStrings[e] -} - -// The event structure. -type yaml_event_t struct { - - // The event type. - typ yaml_event_type_t - - // The start and end of the event. - start_mark, end_mark yaml_mark_t - - // The document encoding (for yaml_STREAM_START_EVENT). - encoding yaml_encoding_t - - // The version directive (for yaml_DOCUMENT_START_EVENT). - version_directive *yaml_version_directive_t - - // The list of tag directives (for yaml_DOCUMENT_START_EVENT). - tag_directives []yaml_tag_directive_t - - // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). - anchor []byte - - // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). - tag []byte - - // The scalar value (for yaml_SCALAR_EVENT). - value []byte - - // Is the document start/end indicator implicit, or the tag optional? - // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). - implicit bool - - // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). - quoted_implicit bool - - // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). - style yaml_style_t -} - -func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } -func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } -func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } - -// Nodes - -const ( - yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. - yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. - yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. - yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. - yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. - yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. - - yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. - yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. - - // Not in original libyaml. - yaml_BINARY_TAG = "tag:yaml.org,2002:binary" - yaml_MERGE_TAG = "tag:yaml.org,2002:merge" - - yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. - yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. - yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. -) - -type yaml_node_type_t int - -// Node types. -const ( - // An empty node. - yaml_NO_NODE yaml_node_type_t = iota - - yaml_SCALAR_NODE // A scalar node. - yaml_SEQUENCE_NODE // A sequence node. - yaml_MAPPING_NODE // A mapping node. -) - -// An element of a sequence node. -type yaml_node_item_t int - -// An element of a mapping node. -type yaml_node_pair_t struct { - key int // The key of the element. - value int // The value of the element. -} - -// The node structure. -type yaml_node_t struct { - typ yaml_node_type_t // The node type. - tag []byte // The node tag. - - // The node data. - - // The scalar parameters (for yaml_SCALAR_NODE). - scalar struct { - value []byte // The scalar value. - length int // The length of the scalar value. - style yaml_scalar_style_t // The scalar style. - } - - // The sequence parameters (for YAML_SEQUENCE_NODE). - sequence struct { - items_data []yaml_node_item_t // The stack of sequence items. - style yaml_sequence_style_t // The sequence style. - } - - // The mapping parameters (for yaml_MAPPING_NODE). - mapping struct { - pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). - pairs_start *yaml_node_pair_t // The beginning of the stack. - pairs_end *yaml_node_pair_t // The end of the stack. - pairs_top *yaml_node_pair_t // The top of the stack. - style yaml_mapping_style_t // The mapping style. - } - - start_mark yaml_mark_t // The beginning of the node. - end_mark yaml_mark_t // The end of the node. - -} - -// The document structure. -type yaml_document_t struct { - - // The document nodes. - nodes []yaml_node_t - - // The version directive. - version_directive *yaml_version_directive_t - - // The list of tag directives. - tag_directives_data []yaml_tag_directive_t - tag_directives_start int // The beginning of the tag directives list. - tag_directives_end int // The end of the tag directives list. - - start_implicit int // Is the document start indicator implicit? - end_implicit int // Is the document end indicator implicit? - - // The start/end of the document. - start_mark, end_mark yaml_mark_t -} - -// The prototype of a read handler. -// -// The read handler is called when the parser needs to read more bytes from the -// source. The handler should write not more than size bytes to the buffer. -// The number of written bytes should be set to the size_read variable. -// -// [in,out] data A pointer to an application data specified by -// yaml_parser_set_input(). -// [out] buffer The buffer to write the data from the source. -// [in] size The size of the buffer. -// [out] size_read The actual number of bytes read from the source. -// -// On success, the handler should return 1. If the handler failed, -// the returned value should be 0. On EOF, the handler should set the -// size_read to 0 and return 1. -type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) - -// This structure holds information about a potential simple key. -type yaml_simple_key_t struct { - possible bool // Is a simple key possible? - required bool // Is a simple key required? - token_number int // The number of the token. - mark yaml_mark_t // The position mark. -} - -// The states of the parser. -type yaml_parser_state_t int - -const ( - yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota - - yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. - yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. - yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. - yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. - yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. - yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. - yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. - yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. - yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. - yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. - yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. - yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. - yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. - yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. - yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. - yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. - yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. - yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. - yaml_PARSE_END_STATE // Expect nothing. -) - -func (ps yaml_parser_state_t) String() string { - switch ps { - case yaml_PARSE_STREAM_START_STATE: - return "yaml_PARSE_STREAM_START_STATE" - case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: - return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" - case yaml_PARSE_DOCUMENT_START_STATE: - return "yaml_PARSE_DOCUMENT_START_STATE" - case yaml_PARSE_DOCUMENT_CONTENT_STATE: - return "yaml_PARSE_DOCUMENT_CONTENT_STATE" - case yaml_PARSE_DOCUMENT_END_STATE: - return "yaml_PARSE_DOCUMENT_END_STATE" - case yaml_PARSE_BLOCK_NODE_STATE: - return "yaml_PARSE_BLOCK_NODE_STATE" - case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: - return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" - case yaml_PARSE_FLOW_NODE_STATE: - return "yaml_PARSE_FLOW_NODE_STATE" - case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: - return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" - case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: - return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" - case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: - return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" - case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: - return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" - case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: - return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" - case yaml_PARSE_FLOW_MAPPING_KEY_STATE: - return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" - case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: - return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: - return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" - case yaml_PARSE_END_STATE: - return "yaml_PARSE_END_STATE" - } - return "" -} - -// This structure holds aliases data. -type yaml_alias_data_t struct { - anchor []byte // The anchor. - index int // The node id. - mark yaml_mark_t // The anchor mark. -} - -// The parser structure. -// -// All members are internal. Manage the structure using the -// yaml_parser_ family of functions. -type yaml_parser_t struct { - - // Error handling - - error yaml_error_type_t // Error type. - - problem string // Error description. - - // The byte about which the problem occurred. - problem_offset int - problem_value int - problem_mark yaml_mark_t - - // The error context. - context string - context_mark yaml_mark_t - - // Reader stuff - - read_handler yaml_read_handler_t // Read handler. - - input_reader io.Reader // File input data. - input []byte // String input data. - input_pos int - - eof bool // EOF flag - - buffer []byte // The working buffer. - buffer_pos int // The current position of the buffer. - - unread int // The number of unread characters in the buffer. - - raw_buffer []byte // The raw buffer. - raw_buffer_pos int // The current position of the buffer. - - encoding yaml_encoding_t // The input encoding. - - offset int // The offset of the current position (in bytes). - mark yaml_mark_t // The mark of the current position. - - // Scanner stuff - - stream_start_produced bool // Have we started to scan the input stream? - stream_end_produced bool // Have we reached the end of the input stream? - - flow_level int // The number of unclosed '[' and '{' indicators. - - tokens []yaml_token_t // The tokens queue. - tokens_head int // The head of the tokens queue. - tokens_parsed int // The number of tokens fetched from the queue. - token_available bool // Does the tokens queue contain a token ready for dequeueing. - - indent int // The current indentation level. - indents []int // The indentation levels stack. - - simple_key_allowed bool // May a simple key occur at the current position? - simple_keys []yaml_simple_key_t // The stack of simple keys. - simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number - - // Parser stuff - - state yaml_parser_state_t // The current parser state. - states []yaml_parser_state_t // The parser states stack. - marks []yaml_mark_t // The stack of marks. - tag_directives []yaml_tag_directive_t // The list of TAG directives. - - // Dumper stuff - - aliases []yaml_alias_data_t // The alias data. - - document *yaml_document_t // The currently parsed document. -} - -// Emitter Definitions - -// The prototype of a write handler. -// -// The write handler is called when the emitter needs to flush the accumulated -// characters to the output. The handler should write @a size bytes of the -// @a buffer to the output. -// -// @param[in,out] data A pointer to an application data specified by -// yaml_emitter_set_output(). -// @param[in] buffer The buffer with bytes to be written. -// @param[in] size The size of the buffer. -// -// @returns On success, the handler should return @c 1. If the handler failed, -// the returned value should be @c 0. -// -type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error - -type yaml_emitter_state_t int - -// The emitter states. -const ( - // Expect STREAM-START. - yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota - - yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. - yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. - yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. - yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. - yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. - yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. - yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. - yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. - yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. - yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. - yaml_EMIT_END_STATE // Expect nothing. -) - -// The emitter structure. -// -// All members are internal. Manage the structure using the @c yaml_emitter_ -// family of functions. -type yaml_emitter_t struct { - - // Error handling - - error yaml_error_type_t // Error type. - problem string // Error description. - - // Writer stuff - - write_handler yaml_write_handler_t // Write handler. - - output_buffer *[]byte // String output data. - output_writer io.Writer // File output data. - - buffer []byte // The working buffer. - buffer_pos int // The current position of the buffer. - - raw_buffer []byte // The raw buffer. - raw_buffer_pos int // The current position of the buffer. - - encoding yaml_encoding_t // The stream encoding. - - // Emitter stuff - - canonical bool // If the output is in the canonical style? - best_indent int // The number of indentation spaces. - best_width int // The preferred width of the output lines. - unicode bool // Allow unescaped non-ASCII characters? - line_break yaml_break_t // The preferred line break. - - state yaml_emitter_state_t // The current emitter state. - states []yaml_emitter_state_t // The stack of states. - - events []yaml_event_t // The event queue. - events_head int // The head of the event queue. - - indents []int // The stack of indentation levels. - - tag_directives []yaml_tag_directive_t // The list of tag directives. - - indent int // The current indentation level. - - flow_level int // The current flow level. - - root_context bool // Is it the document root context? - sequence_context bool // Is it a sequence context? - mapping_context bool // Is it a mapping context? - simple_key_context bool // Is it a simple mapping key context? - - line int // The current line. - column int // The current column. - whitespace bool // If the last character was a whitespace? - indention bool // If the last character was an indentation character (' ', '-', '?', ':')? - open_ended bool // If an explicit document end is required? - - // Anchor analysis. - anchor_data struct { - anchor []byte // The anchor value. - alias bool // Is it an alias? - } - - // Tag analysis. - tag_data struct { - handle []byte // The tag handle. - suffix []byte // The tag suffix. - } - - // Scalar analysis. - scalar_data struct { - value []byte // The scalar value. - multiline bool // Does the scalar contain line breaks? - flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? - block_plain_allowed bool // Can the scalar be expressed in the block plain style? - single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? - block_allowed bool // Can the scalar be expressed in the literal or folded styles? - style yaml_scalar_style_t // The output style. - } - - // Dumper stuff - - opened bool // If the stream was already opened? - closed bool // If the stream was already closed? - - // The information associated with the document nodes. - anchors *struct { - references int // The number of references. - anchor int // The anchor id. - serialized bool // If the node has been emitted? - } - - last_anchor_id int // The last assigned anchor id. - - document *yaml_document_t // The currently emitted document. -} diff --git a/vendor/gopkg.in/yaml.v2/yamlprivateh.go b/vendor/gopkg.in/yaml.v2/yamlprivateh.go deleted file mode 100644 index 8110ce3c37..0000000000 --- a/vendor/gopkg.in/yaml.v2/yamlprivateh.go +++ /dev/null @@ -1,173 +0,0 @@ -package yaml - -const ( - // The size of the input raw buffer. - input_raw_buffer_size = 512 - - // The size of the input buffer. - // It should be possible to decode the whole raw buffer. - input_buffer_size = input_raw_buffer_size * 3 - - // The size of the output buffer. - output_buffer_size = 128 - - // The size of the output raw buffer. - // It should be possible to encode the whole output buffer. - output_raw_buffer_size = (output_buffer_size*2 + 2) - - // The size of other stacks and queues. - initial_stack_size = 16 - initial_queue_size = 16 - initial_string_size = 16 -) - -// Check if the character at the specified position is an alphabetical -// character, a digit, '_', or '-'. -func is_alpha(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' -} - -// Check if the character at the specified position is a digit. -func is_digit(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' -} - -// Get the value of a digit. -func as_digit(b []byte, i int) int { - return int(b[i]) - '0' -} - -// Check if the character at the specified position is a hex-digit. -func is_hex(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' -} - -// Get the value of a hex-digit. -func as_hex(b []byte, i int) int { - bi := b[i] - if bi >= 'A' && bi <= 'F' { - return int(bi) - 'A' + 10 - } - if bi >= 'a' && bi <= 'f' { - return int(bi) - 'a' + 10 - } - return int(bi) - '0' -} - -// Check if the character is ASCII. -func is_ascii(b []byte, i int) bool { - return b[i] <= 0x7F -} - -// Check if the character at the start of the buffer can be printed unescaped. -func is_printable(b []byte, i int) bool { - return ((b[i] == 0x0A) || // . == #x0A - (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E - (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF - (b[i] > 0xC2 && b[i] < 0xED) || - (b[i] == 0xED && b[i+1] < 0xA0) || - (b[i] == 0xEE) || - (b[i] == 0xEF && // #xE000 <= . <= #xFFFD - !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF - !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) -} - -// Check if the character at the specified position is NUL. -func is_z(b []byte, i int) bool { - return b[i] == 0x00 -} - -// Check if the beginning of the buffer is a BOM. -func is_bom(b []byte, i int) bool { - return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF -} - -// Check if the character at the specified position is space. -func is_space(b []byte, i int) bool { - return b[i] == ' ' -} - -// Check if the character at the specified position is tab. -func is_tab(b []byte, i int) bool { - return b[i] == '\t' -} - -// Check if the character at the specified position is blank (space or tab). -func is_blank(b []byte, i int) bool { - //return is_space(b, i) || is_tab(b, i) - return b[i] == ' ' || b[i] == '\t' -} - -// Check if the character at the specified position is a line break. -func is_break(b []byte, i int) bool { - return (b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) -} - -func is_crlf(b []byte, i int) bool { - return b[i] == '\r' && b[i+1] == '\n' -} - -// Check if the character is a line break or NUL. -func is_breakz(b []byte, i int) bool { - //return is_break(b, i) || is_z(b, i) - return ( // is_break: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - // is_z: - b[i] == 0) -} - -// Check if the character is a line break, space, or NUL. -func is_spacez(b []byte, i int) bool { - //return is_space(b, i) || is_breakz(b, i) - return ( // is_space: - b[i] == ' ' || - // is_breakz: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - b[i] == 0) -} - -// Check if the character is a line break, space, tab, or NUL. -func is_blankz(b []byte, i int) bool { - //return is_blank(b, i) || is_breakz(b, i) - return ( // is_blank: - b[i] == ' ' || b[i] == '\t' || - // is_breakz: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - b[i] == 0) -} - -// Determine the width of the character. -func width(b byte) int { - // Don't replace these by a switch without first - // confirming that it is being inlined. - if b&0x80 == 0x00 { - return 1 - } - if b&0xE0 == 0xC0 { - return 2 - } - if b&0xF0 == 0xE0 { - return 3 - } - if b&0xF8 == 0xF0 { - return 4 - } - return 0 - -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 691818ef9e..f83ba988e3 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -18,7 +18,7 @@ github.com/Microsoft/go-winio/internal/socket github.com/Microsoft/go-winio/internal/stringbuffer github.com/Microsoft/go-winio/pkg/guid github.com/Microsoft/go-winio/vhd -# github.com/Microsoft/hcsshim v0.12.0 +# github.com/Microsoft/hcsshim v0.12.3 ## explicit; go 1.21 github.com/Microsoft/hcsshim github.com/Microsoft/hcsshim/computestorage @@ -107,8 +107,8 @@ github.com/chzyer/readline # github.com/containerd/cgroups/v3 v3.0.3 ## explicit; go 1.18 github.com/containerd/cgroups/v3/cgroup1/stats -# github.com/containerd/containerd v1.7.13 -## explicit; go 1.19 +# github.com/containerd/containerd v1.7.16 +## explicit; go 1.21 github.com/containerd/containerd/errdefs github.com/containerd/containerd/log github.com/containerd/containerd/pkg/userns @@ -141,7 +141,7 @@ github.com/containernetworking/cni/pkg/version # github.com/containernetworking/plugins v1.4.1 ## explicit; go 1.20 github.com/containernetworking/plugins/pkg/ns -# github.com/containers/buildah v1.35.1-0.20240425012436-fc18157da3a1 +# github.com/containers/buildah v1.35.1-0.20240507152307-453d2fc1099f ## explicit; go 1.20 github.com/containers/buildah github.com/containers/buildah/bind @@ -171,8 +171,8 @@ github.com/containers/buildah/pkg/sshagent github.com/containers/buildah/pkg/util github.com/containers/buildah/pkg/volumes github.com/containers/buildah/util -# github.com/containers/common v0.58.1-0.20240419143618-deb3eeef3b74 -## explicit; go 1.20 +# github.com/containers/common v0.58.1-0.20240508094622-694258d1f523 +## explicit; go 1.21 github.com/containers/common/internal github.com/containers/common/internal/attributedstring github.com/containers/common/libimage @@ -220,6 +220,7 @@ github.com/containers/common/pkg/retry github.com/containers/common/pkg/rootlessport github.com/containers/common/pkg/seccomp github.com/containers/common/pkg/secrets +github.com/containers/common/pkg/secrets/define github.com/containers/common/pkg/secrets/filedriver github.com/containers/common/pkg/secrets/passdriver github.com/containers/common/pkg/secrets/shelldriver @@ -243,8 +244,8 @@ github.com/containers/conmon/runner/config # github.com/containers/gvisor-tap-vsock v0.7.4-0.20240408151405-d744d71db363 ## explicit; go 1.20 github.com/containers/gvisor-tap-vsock/pkg/types -# github.com/containers/image/v5 v5.30.1-0.20240411200840-dc519780d39f -## explicit; go 1.20 +# github.com/containers/image/v5 v5.30.1-0.20240506211741-f0acff0fe152 +## explicit; go 1.21 github.com/containers/image/v5/copy github.com/containers/image/v5/directory github.com/containers/image/v5/directory/explicitfilepath @@ -324,7 +325,7 @@ github.com/containers/libhvee/pkg/wmiext # github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 ## explicit github.com/containers/libtrust -# github.com/containers/luksy v0.0.0-20240312134643-3d2cf0e19c84 +# github.com/containers/luksy v0.0.0-20240408185936-afd8e7619947 ## explicit; go 1.20 github.com/containers/luksy # github.com/containers/ocicrypt v1.1.10 @@ -354,14 +355,13 @@ github.com/containers/psgo/internal/dev github.com/containers/psgo/internal/host github.com/containers/psgo/internal/proc github.com/containers/psgo/internal/process -# github.com/containers/storage v1.53.1-0.20240411065836-1fd0dc1d20e5 -## explicit; go 1.20 +# github.com/containers/storage v1.53.1-0.20240507041447-6cee10795c2d +## explicit; go 1.21 github.com/containers/storage github.com/containers/storage/drivers github.com/containers/storage/drivers/aufs github.com/containers/storage/drivers/btrfs github.com/containers/storage/drivers/copy -github.com/containers/storage/drivers/devmapper github.com/containers/storage/drivers/overlay github.com/containers/storage/drivers/overlayutils github.com/containers/storage/drivers/quota @@ -377,9 +377,7 @@ github.com/containers/storage/pkg/chunked/dump github.com/containers/storage/pkg/chunked/internal github.com/containers/storage/pkg/chunked/toc github.com/containers/storage/pkg/config -github.com/containers/storage/pkg/devicemapper github.com/containers/storage/pkg/directory -github.com/containers/storage/pkg/dmesg github.com/containers/storage/pkg/fileutils github.com/containers/storage/pkg/fsutils github.com/containers/storage/pkg/fsverity @@ -409,8 +407,8 @@ github.com/containers/storage/types ## explicit; go 1.19 github.com/containers/winquit/pkg/winquit github.com/containers/winquit/pkg/winquit/win32 -# github.com/coreos/go-oidc/v3 v3.9.0 -## explicit; go 1.19 +# github.com/coreos/go-oidc/v3 v3.10.0 +## explicit; go 1.21 github.com/coreos/go-oidc/v3/oidc # github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f ## explicit @@ -555,6 +553,11 @@ github.com/gin-gonic/gin/render github.com/go-jose/go-jose/v3 github.com/go-jose/go-jose/v3/cipher github.com/go-jose/go-jose/v3/json +# github.com/go-jose/go-jose/v4 v4.0.1 +## explicit; go 1.21 +github.com/go-jose/go-jose/v4 +github.com/go-jose/go-jose/v4/cipher +github.com/go-jose/go-jose/v4/json # github.com/go-logr/logr v1.4.1 ## explicit; go 1.18 github.com/go-logr/logr @@ -566,8 +569,8 @@ github.com/go-logr/stdr ## explicit; go 1.12 github.com/go-ole/go-ole github.com/go-ole/go-ole/oleutil -# github.com/go-openapi/analysis v0.21.4 -## explicit; go 1.13 +# github.com/go-openapi/analysis v0.23.0 +## explicit; go 1.20 github.com/go-openapi/analysis github.com/go-openapi/analysis/internal/debug github.com/go-openapi/analysis/internal/flatten/normalize @@ -578,18 +581,18 @@ github.com/go-openapi/analysis/internal/flatten/sortref # github.com/go-openapi/errors v0.22.0 ## explicit; go 1.20 github.com/go-openapi/errors -# github.com/go-openapi/jsonpointer v0.19.6 -## explicit; go 1.13 +# github.com/go-openapi/jsonpointer v0.21.0 +## explicit; go 1.20 github.com/go-openapi/jsonpointer -# github.com/go-openapi/jsonreference v0.20.2 -## explicit; go 1.13 +# github.com/go-openapi/jsonreference v0.21.0 +## explicit; go 1.20 github.com/go-openapi/jsonreference github.com/go-openapi/jsonreference/internal -# github.com/go-openapi/loads v0.21.2 -## explicit; go 1.13 +# github.com/go-openapi/loads v0.22.0 +## explicit; go 1.20 github.com/go-openapi/loads -# github.com/go-openapi/runtime v0.26.0 -## explicit; go 1.18 +# github.com/go-openapi/runtime v0.28.0 +## explicit; go 1.20 github.com/go-openapi/runtime github.com/go-openapi/runtime/client github.com/go-openapi/runtime/logger @@ -599,8 +602,8 @@ github.com/go-openapi/runtime/middleware/header github.com/go-openapi/runtime/middleware/untyped github.com/go-openapi/runtime/security github.com/go-openapi/runtime/yamlpc -# github.com/go-openapi/spec v0.20.9 -## explicit; go 1.13 +# github.com/go-openapi/spec v0.21.0 +## explicit; go 1.20 github.com/go-openapi/spec # github.com/go-openapi/strfmt v0.23.0 ## explicit; go 1.20 @@ -608,8 +611,8 @@ github.com/go-openapi/strfmt # github.com/go-openapi/swag v0.23.0 ## explicit; go 1.20 github.com/go-openapi/swag -# github.com/go-openapi/validate v0.22.1 -## explicit; go 1.14 +# github.com/go-openapi/validate v0.24.0 +## explicit; go 1.20 github.com/go-openapi/validate # github.com/go-playground/locales v0.14.1 ## explicit; go 1.17 @@ -654,7 +657,7 @@ github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value -# github.com/google/go-containerregistry v0.19.0 +# github.com/google/go-containerregistry v0.19.1 ## explicit; go 1.18 github.com/google/go-containerregistry/pkg/name github.com/google/go-containerregistry/pkg/v1 @@ -837,7 +840,7 @@ github.com/nxadm/tail/winfile # github.com/oklog/ulid v1.3.1 ## explicit github.com/oklog/ulid -# github.com/onsi/ginkgo/v2 v2.17.2 +# github.com/onsi/ginkgo/v2 v2.17.3 ## explicit; go 1.20 github.com/onsi/ginkgo/v2 github.com/onsi/ginkgo/v2/config @@ -984,12 +987,12 @@ github.com/shirou/gopsutil/v3/process # github.com/shoenig/go-m1cpu v0.1.6 ## explicit; go 1.20 github.com/shoenig/go-m1cpu -# github.com/sigstore/fulcio v1.4.3 -## explicit; go 1.20 +# github.com/sigstore/fulcio v1.4.5 +## explicit; go 1.21 github.com/sigstore/fulcio/pkg/api github.com/sigstore/fulcio/pkg/certificate -# github.com/sigstore/rekor v1.2.2 -## explicit; go 1.19 +# github.com/sigstore/rekor v1.3.6 +## explicit; go 1.21 github.com/sigstore/rekor/pkg/client github.com/sigstore/rekor/pkg/generated/client github.com/sigstore/rekor/pkg/generated/client/entries @@ -1026,8 +1029,8 @@ github.com/stefanberger/go-pkcs11uri ## explicit; go 1.17 github.com/stretchr/testify/assert github.com/stretchr/testify/require -# github.com/sylabs/sif/v2 v2.15.2 -## explicit; go 1.20 +# github.com/sylabs/sif/v2 v2.16.0 +## explicit; go 1.21 github.com/sylabs/sif/v2/pkg/sif # github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 ## explicit @@ -1118,11 +1121,11 @@ go.opencensus.io/internal go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/tracestate -# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 -## explicit; go 1.19 +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 +## explicit; go 1.20 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil -# go.opentelemetry.io/otel v1.22.0 +# go.opentelemetry.io/otel v1.24.0 ## explicit; go 1.20 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute @@ -1133,16 +1136,15 @@ go.opentelemetry.io/otel/internal/attribute go.opentelemetry.io/otel/internal/baggage go.opentelemetry.io/otel/internal/global go.opentelemetry.io/otel/propagation -go.opentelemetry.io/otel/semconv/internal -go.opentelemetry.io/otel/semconv/v1.12.0 +go.opentelemetry.io/otel/semconv/internal/v2 go.opentelemetry.io/otel/semconv/v1.17.0 -# go.opentelemetry.io/otel/metric v1.22.0 +go.opentelemetry.io/otel/semconv/v1.17.0/httpconv +go.opentelemetry.io/otel/semconv/v1.20.0 +# go.opentelemetry.io/otel/metric v1.24.0 ## explicit; go 1.20 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded -# go.opentelemetry.io/otel/sdk v1.21.0 -## explicit; go 1.20 -# go.opentelemetry.io/otel/trace v1.22.0 +# go.opentelemetry.io/otel/trace v1.24.0 ## explicit; go 1.20 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded @@ -1205,7 +1207,7 @@ golang.org/x/net/internal/socks golang.org/x/net/internal/timeseries golang.org/x/net/proxy golang.org/x/net/trace -# golang.org/x/oauth2 v0.19.0 +# golang.org/x/oauth2 v0.20.0 ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/internal @@ -1253,10 +1255,10 @@ golang.org/x/time/rate ## explicit; go 1.19 golang.org/x/tools/cover golang.org/x/tools/go/ast/inspector -# google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 ## explicit; go 1.19 google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.62.0 +# google.golang.org/grpc v1.62.1 ## explicit; go 1.19 google.golang.org/grpc google.golang.org/grpc/attributes @@ -1361,9 +1363,6 @@ gopkg.in/natefinch/lumberjack.v2 # gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 ## explicit gopkg.in/tomb.v1 -# gopkg.in/yaml.v2 v2.4.0 -## explicit; go 1.15 -gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 From 521bbab86467b8c01ed90c72602a3b7e9e55f0d2 Mon Sep 17 00:00:00 2001 From: Paul Holzinger Date: Wed, 8 May 2024 14:58:52 +0200 Subject: [PATCH 2/2] test/system: fix broken "podman volume globs" test This never tested what it said it did, the command line was wrong so `,ro=false` was taken as image causing a error. What this actually should care about is that a glob is taken as is and not evaluated. Signed-off-by: Paul Holzinger --- test/system/060-mount.bats | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/system/060-mount.bats b/test/system/060-mount.bats index 070dba5eb9..4cedd3b4aa 100644 --- a/test/system/060-mount.bats +++ b/test/system/060-mount.bats @@ -289,11 +289,11 @@ EOF is "$output" ".*$vol1a" "podman images --inspect should include $vol1a" is "$output" ".*$vol1b" "podman images --inspect should include $vol1b" - run_podman 125 run --rm --mount source=${PODMAN_TMPDIR}/v2\*,type=bind,ro=false $IMAGE touch $vol2 + run_podman 125 run --rm --mount type=bind,source=${PODMAN_TMPDIR}/v2\*,ro=false $IMAGE touch $vol2 is "$output" "Error: must set volume destination" "Bind mounts require destination" - run_podman 125 run --rm --mount source=${PODMAN_TMPDIR}/v2\*,destination=/tmp/foobar, ro=false $IMAGE touch $vol2 - is "$output" "Error: invalid reference format" "Default mounts don not support globs" + run_podman 125 run --rm --mount type=bind,source=${PODMAN_TMPDIR}/v2\*,destination=/tmp/foobar,ro=false $IMAGE touch $vol2 + is "$output" "Error: statfs ${PODMAN_TMPDIR}/v2*: no such file or directory" "Bind mount should not interpret glob and must use as is" mkdir $PODMAN_TMPDIR/foo1 $PODMAN_TMPDIR/foo2 $PODMAN_TMPDIR/foo3 touch $PODMAN_TMPDIR/foo1/bar $PODMAN_TMPDIR/foo2/bar $PODMAN_TMPDIR/foo3/bar