From 49020d9e6cefd345586059980b4c1e87668d35de Mon Sep 17 00:00:00 2001 From: Mirko Brombin Date: Sun, 28 Apr 2024 20:02:11 +0200 Subject: [PATCH] feat: bump prometheus to v1.0.0 --- go.mod | 111 +- go.sum | 383 +-- .../Microsoft/hcsshim/.golangci.yml | 7 + vendor/github.com/Microsoft/hcsshim/README.md | 84 +- .../hcsshim/computestorage/attach.go | 28 + .../hcsshim/computestorage/detach.go | 26 + .../hcsshim/computestorage/storage.go | 7 +- .../computestorage/zsyscall_windows.go | 60 + .../github.com/Microsoft/hcsshim/container.go | 2 +- vendor/github.com/Microsoft/hcsshim/errors.go | 11 +- .../Microsoft/hcsshim/internal/hcs/process.go | 2 +- .../hcsshim/internal/hcs/schema2/layer.go | 7 + .../internal/hcs/schema2/registry_hive.go | 13 + .../internal/hcs/schema2/registry_key.go | 2 +- .../internal/hcs/schema2/registry_value.go | 2 +- .../hcs/schema2/registry_value_type.go | 17 + .../Microsoft/hcsshim/internal/hcs/system.go | 8 +- .../hcsshim/internal/hns/hnsfuncs.go | 2 +- .../hcsshim/internal/hns/namespace.go | 4 +- .../hcsshim/internal/jobobject/iocp.go | 3 +- .../hcsshim/internal/jobobject/jobobject.go | 2 +- .../hcsshim/internal/jobobject/limits.go | 7 + .../Microsoft/hcsshim/internal/log/format.go | 1 + .../hcsshim/internal/logfields/fields.go | 1 + .../Microsoft/hcsshim/internal/memory/pool.go | 6 +- .../hcsshim/internal/safefile/safeopen.go | 2 +- .../hcsshim/internal/vmcompute/vmcompute.go | 16 +- .../internal/wclayer/baselayerreader.go | 6 +- .../internal/wclayer/converttobaselayer.go | 2 + .../internal/wclayer/expandscratchsize.go | 18 +- .../hcsshim/internal/wclayer/legacy.go | 8 +- .../hcsshim/internal/winapi/cimfs.go | 4 +- .../internal/winapi/zsyscall_windows.go | 14 +- vendor/github.com/aead/serpent/.gitignore | 25 + vendor/github.com/aead/serpent/LICENSE | 21 + vendor/github.com/aead/serpent/README.md | 9 + vendor/github.com/aead/serpent/sbox_ref.go | 316 +++ vendor/github.com/aead/serpent/serpent.go | 119 + vendor/github.com/aead/serpent/serpent_ref.go | 276 ++ .../containerd/typeurl/v2/.gitignore | 2 + .../github.com/containerd/typeurl/v2/LICENSE | 191 ++ .../containerd/typeurl/v2/README.md | 20 + .../github.com/containerd/typeurl/v2/doc.go | 83 + .../github.com/containerd/typeurl/v2/types.go | 269 ++ .../github.com/containers/buildah/.cirrus.yml | 70 +- .../github.com/containers/buildah/.gitignore | 2 + .../github.com/containers/buildah/.packit.sh | 28 - .../containers/buildah/.packit.yaml | 60 +- .../containers/buildah/CHANGELOG.md | 286 ++ vendor/github.com/containers/buildah/Makefile | 33 +- vendor/github.com/containers/buildah/add.go | 40 +- .../github.com/containers/buildah/buildah.go | 22 +- .../containers/buildah/buildah.spec.rpkg | 165 -- .../containers/buildah/changelog.txt | 280 ++ .../containers/buildah/chroot/run_common.go | 4 + .../containers/buildah/chroot/run_linux.go | 153 +- .../containers/buildah/chroot/seccomp.go | 8 +- .../buildah/chroot/seccomp_freebsd.go | 2 + .../buildah/chroot/seccomp_unsupported.go | 2 + .../github.com/containers/buildah/commit.go | 32 +- .../containers/buildah/convertcw.go | 217 ++ .../containers/buildah/copier/copier.go | 7 +- .../hardlink_not_uint64.go} | 3 +- .../hardlink_uint64.go} | 2 +- .../buildah/copier/hardlink_unix.go | 32 + .../buildah/copier/hardlink_windows.go | 17 + .../containers/buildah/define/build.go | 11 + .../containers/buildah/define/types.go | 35 +- vendor/github.com/containers/buildah/image.go | 112 +- .../containers/buildah/imagebuildah/build.go | 11 +- .../buildah/imagebuildah/executor.go | 141 +- .../buildah/imagebuildah/stage_executor.go | 323 ++- .../github.com/containers/buildah/import.go | 1 + .../github.com/containers/buildah/install.md | 46 +- .../buildah/internal/config/convert.go | 121 + .../buildah/internal/config/executor.go | 45 + .../buildah/internal/config/override.go | 181 ++ .../buildah/internal/mkcw/archive.go | 464 +++ .../buildah/internal/mkcw/attest.go | 250 ++ .../buildah/internal/mkcw/embed/entrypoint.gz | Bin 0 -> 405 bytes .../buildah/internal/mkcw/entrypoint.go | 6 + .../containers/buildah/internal/mkcw/luks.go | 51 + .../buildah/internal/mkcw/makefs.go | 38 + .../buildah/internal/mkcw/types/attest.go | 47 + .../buildah/internal/mkcw/types/workload.go | 34 + .../buildah/internal/mkcw/workload.go | 223 ++ .../buildah/internal/parse/parse.go | 608 ---- .../buildah/internal/tmpdir/tmpdir.go | 26 + .../containers/buildah/internal/util/util.go | 59 +- .../buildah/internal/volumes/volumes.go | 642 +++++ .../containers/buildah/libdm_tag.sh | 15 - vendor/github.com/containers/buildah/new.go | 10 +- .../buildah/pkg/chrootuser/user_unix.go | 20 +- .../containers/buildah/pkg/overlay/overlay.go | 69 - .../buildah/pkg/overlay/overlay_freebsd.go | 31 + .../buildah/pkg/overlay/overlay_linux.go | 80 + .../containers/buildah/pkg/parse/parse.go | 126 +- .../buildah/pkg/sshagent/sshagent.go | 10 +- .../buildah/pkg/util/resource_unix.go | 38 + .../buildah/pkg/util/resource_windows.go | 16 + .../containers/buildah/pkg/util/util.go | 7 +- vendor/github.com/containers/buildah/push.go | 5 + vendor/github.com/containers/buildah/run.go | 9 +- .../containers/buildah/run_common.go | 187 +- .../containers/buildah/run_freebsd.go | 88 +- .../containers/buildah/run_linux.go | 318 +-- .../containers/buildah/selinux_tag.sh | 4 - .../containers/buildah/util/types.go | 12 +- .../containers/buildah/util/util_unix.go | 23 - .../containers/buildah/util/util_windows.go | 10 +- .../common/internal/attributedstring/slice.go | 102 + .../containers/common/libimage/copier.go | 14 +- .../common/libimage/define/platform.go | 11 + .../containers/common/libimage/disk_usage.go | 3 + .../containers/common/libimage/events.go | 3 + .../containers/common/libimage/filters.go | 15 +- .../containers/common/libimage/history.go | 54 +- .../containers/common/libimage/image.go | 78 +- .../common/libimage/image_config.go | 3 + .../containers/common/libimage/image_tree.go | 3 + .../containers/common/libimage/import.go | 3 + .../containers/common/libimage/inspect.go | 3 + .../containers/common/libimage/layer_tree.go | 25 + .../containers/common/libimage/load.go | 3 + .../common/libimage/manifest_list.go | 5 + .../common/libimage/manifests/manifests.go | 45 +- .../containers/common/libimage/normalize.go | 3 + .../containers/common/libimage/oci.go | 3 + .../containers/common/libimage/platform.go | 99 +- .../common/libimage/platform/platform.go | 57 + .../containers/common/libimage/pull.go | 3 + .../containers/common/libimage/push.go | 3 + .../containers/common/libimage/runtime.go | 13 +- .../containers/common/libimage/save.go | 3 + .../containers/common/libimage/search.go | 3 + .../common/libnetwork/cni/network.go | 16 +- .../common/libnetwork/netavark/config.go | 5 +- .../common/libnetwork/netavark/network.go | 14 +- .../common/libnetwork/network/interface.go | 4 +- .../common/libnetwork/pasta/pasta.go | 140 + .../common/libnetwork/slirp4netns/const.go | 12 + .../libnetwork/slirp4netns/slirp4netns.go | 752 +++++ .../common/libnetwork/types/const.go | 1 + .../common/libnetwork/util/filters.go | 4 +- .../common/pkg/apparmor/apparmor_linux.go | 5 + .../containers/common/pkg/auth/auth.go | 402 +++ .../containers/common/pkg/auth/cli.go | 87 + .../common/pkg/completion/completion.go | 155 ++ .../containers/common/pkg/config/config.go | 186 +- .../common/pkg/config/config_darwin.go | 4 +- .../common/pkg/config/config_local.go | 6 +- .../common/pkg/config/containers.conf | 29 +- .../common/pkg/config/containers.conf-freebsd | 29 +- .../common/pkg/config/db_backend.go | 17 +- .../containers/common/pkg/config/default.go | 162 +- .../containers/common/pkg/filters/filters.go | 55 +- .../common/pkg/password/password_supported.go | 57 + .../common/pkg/password/password_windows.go | 21 + .../pkg/rootlessport/rootlessport_linux.go | 27 + .../common/pkg/servicereaper/service.go | 65 + .../common/pkg/subscriptions/subscriptions.go | 29 + .../containers/common/pkg/util/copy.go | 57 - .../containers/common/pkg/util/util.go | 122 - .../common/pkg/util/util_supported.go | 44 - .../common/pkg/util/util_windows.go | 15 - .../containers/common/pkg/version/version.go | 105 + .../containers/common/version/version.go | 2 +- .../containers/image/v5/copy/compression.go | 17 +- .../containers/image/v5/copy/encryption.go | 2 +- .../containers/image/v5/copy/multiple.go | 2 +- .../containers/image/v5/copy/single.go | 11 +- .../image/v5/docker/daemon/client.go | 7 +- .../image/v5/docker/daemon/daemon_dest.go | 33 +- .../image/v5/docker/distribution_error.go | 3 +- .../image/v5/docker/docker_client.go | 5 + .../image/v5/docker/docker_image.go | 3 + .../image/v5/docker/docker_image_dest.go | 75 +- .../image/v5/docker/docker_image_src.go | 15 +- .../image/v5/docker/docker_transport.go | 63 +- .../containers/image/v5/docker/errors.go | 2 +- .../containers/image/v5/image/unparsed.go | 22 + .../image/v5/internal/blobinfocache/types.go | 9 +- .../containers/image/v5/internal/image/oci.go | 57 +- .../internal/imagedestination/impl/helpers.go | 5 + .../internal/manifest/docker_schema2_list.go | 4 +- .../image/v5/internal/manifest/oci_index.go | 6 +- .../image/v5/oci/archive/oci_src.go | 12 + .../image/v5/oci/archive/oci_transport.go | 19 +- .../image/v5/oci/layout/oci_delete.go | 240 ++ .../image/v5/oci/layout/oci_dest.go | 15 +- .../containers/image/v5/oci/layout/oci_src.go | 2 +- .../image/v5/oci/layout/oci_transport.go | 50 +- .../internal/prioritize/prioritize.go | 56 +- .../v5/pkg/blobinfocache/memory/memory.go | 54 +- .../v5/pkg/blobinfocache/sqlite/sqlite.go | 48 +- .../image/v5/pkg/docker/config/config.go | 281 +- .../image/v5/pkg/shortnames/shortnames.go | 3 +- .../v5/pkg/tlsclientconfig/tlsclientconfig.go | 2 +- .../image/v5/storage/storage_reference.go | 31 + .../image/v5/storage/storage_transport.go | 33 + .../containers/image/v5/types/types.go | 8 +- .../containers/image/v5/version/version.go | 4 +- .../github.com/containers/luksy/.cirrus.yml | 32 + .../github.com/containers/luksy/.dockerignore | 2 + vendor/github.com/containers/luksy/.gitignore | 21 + vendor/github.com/containers/luksy/Dockerfile | 7 + .../luksy}/LICENSE | 0 vendor/github.com/containers/luksy/Makefile | 14 + vendor/github.com/containers/luksy/OWNERS | 4 + vendor/github.com/containers/luksy/README.md | 10 + vendor/github.com/containers/luksy/decrypt.go | 255 ++ vendor/github.com/containers/luksy/encrypt.go | 421 +++ .../github.com/containers/luksy/encryption.go | 572 ++++ vendor/github.com/containers/luksy/luks.go | 75 + vendor/github.com/containers/luksy/tune.go | 55 + .../github.com/containers/luksy/v1header.go | 321 +++ .../github.com/containers/luksy/v2header.go | 203 ++ vendor/github.com/containers/luksy/v2json.go | 157 ++ .../ocicrypt/config/pkcs11config/config.go | 123 - .../ocicrypt/helpers/parse_helpers.go | 377 --- .../github.com/containers/storage/.cirrus.yml | 6 +- vendor/github.com/containers/storage/VERSION | 2 +- .../containers/storage/drivers/driver.go | 3 + .../storage/drivers/overlay/check.go | 33 + .../drivers/overlay/composefs_notsupported.go | 6 +- .../drivers/overlay/composefs_supported.go | 54 +- .../storage/drivers/overlay/mount.go | 21 +- .../storage/drivers/overlay/overlay.go | 95 +- .../github.com/containers/storage/layers.go | 20 +- .../containers/storage/pkg/archive/archive.go | 19 +- .../storage/pkg/chunked/cache_linux.go | 5 + .../storage/pkg/chunked/dump/dump.go | 230 ++ .../storage/pkg/chunked/storage_linux.go | 22 +- .../storage/pkg/loopback/attach_loopback.go | 18 +- .../containers/storage/pkg/system/rm.go | 6 +- .../storage/pkg/unshare/unshare_darwin.go | 5 + .../storage/pkg/unshare/unshare_linux.go | 10 + .../pkg/unshare/unshare_unsupported.go | 5 + .../containers/storage/storage.conf | 5 +- vendor/github.com/containers/storage/store.go | 25 +- .../containers/storage/types/options.go | 5 +- .../github.com/docker/docker/api/swagger.yaml | 4 +- .../docker/docker/client/container_wait.go | 8 +- .../docker/docker/pkg/idtools/idtools_unix.go | 14 +- .../docker/docker/pkg/ioutils/readers.go | 21 + .../docker/docker/pkg/ioutils/writers.go | 10 +- .../docker/go-connections/nat/nat.go | 14 +- .../docker/go-connections/nat/parse.go | 28 +- .../docker/go-connections/sockets/sockets.go | 19 +- .../go-connections/sockets/sockets_unix.go | 9 +- .../go-connections/sockets/unix_socket.go | 10 +- .../{certpool_go17.go => certpool.go} | 2 - .../tlsconfig/certpool_other.go | 13 - .../docker/go-connections/tlsconfig/config.go | 58 +- .../tlsconfig/config_client_ciphers.go | 3 - .../tlsconfig/config_legacy_client_ciphers.go | 15 - .../tlsconfig/versions_go113.go | 16 - .../tlsconfig/versions_other.go | 15 - .../go-jose/go-jose/v3/BUG-BOUNTY.md | 10 - .../go-jose/go-jose/v3/CHANGELOG.md | 72 +- .../github.com/go-jose/go-jose/v3/README.md | 60 +- .../github.com/go-jose/go-jose/v3/SECURITY.md | 13 + .../go-jose/go-jose/v3/asymmetric.go | 3 + .../github.com/go-jose/go-jose/v3/crypter.go | 99 +- vendor/github.com/go-jose/go-jose/v3/doc.go | 2 - .../github.com/go-jose/go-jose/v3/encoding.go | 54 +- .../go-jose/go-jose/v3/json/decode.go | 3 +- .../go-jose/go-jose/v3/json/encode.go | 28 +- .../go-jose/go-jose/v3/json/stream.go | 1 - vendor/github.com/go-jose/go-jose/v3/jwe.go | 14 +- vendor/github.com/go-jose/go-jose/v3/jwk.go | 18 +- vendor/github.com/go-jose/go-jose/v3/jws.go | 13 +- .../github.com/go-jose/go-jose/v3/opaque.go | 2 +- .../github.com/go-jose/go-jose/v3/shared.go | 9 +- .../github.com/go-jose/go-jose/v3/signing.go | 59 +- .../go-jose/go-jose/v3/symmetric.go | 15 +- .../go-openapi/analysis/.golangci.yml | 53 +- .../github.com/go-openapi/analysis/README.md | 10 +- .../go-openapi/analysis/appveyor.yml | 32 - vendor/github.com/go-openapi/analysis/doc.go | 10 +- .../github.com/go-openapi/analysis/flatten.go | 64 +- .../go-openapi/analysis/flatten_name.go | 39 +- .../go-openapi/analysis/flatten_options.go | 1 + .../internal/flatten/replace/replace.go | 42 +- .../analysis/internal/flatten/sortref/keys.go | 2 +- .../github.com/go-openapi/analysis/mixin.go | 2 +- .../github.com/go-openapi/analysis/schema.go | 8 +- .../go-openapi/errors/.golangci.yml | 52 +- vendor/github.com/go-openapi/errors/README.md | 5 +- vendor/github.com/go-openapi/errors/api.go | 18 +- vendor/github.com/go-openapi/errors/schema.go | 6 +- .../go-openapi/jsonpointer/.golangci.yml | 61 + .../go-openapi/jsonpointer/README.md | 8 +- .../go-openapi/jsonpointer/pointer.go | 68 +- .../go-openapi/jsonreference/.golangci.yml | 57 +- .../go-openapi/jsonreference/README.md | 14 +- .../github.com/go-openapi/loads/.golangci.yml | 49 +- vendor/github.com/go-openapi/loads/README.md | 2 +- vendor/github.com/go-openapi/loads/TODO.md | 3 + vendor/github.com/go-openapi/loads/doc.go | 9 +- vendor/github.com/go-openapi/loads/loaders.go | 7 +- vendor/github.com/go-openapi/loads/spec.go | 35 +- .../go-openapi/runtime/.golangci.yml | 56 +- .../github.com/go-openapi/runtime/README.md | 11 +- .../go-openapi/runtime/bytestream.go | 18 +- .../go-openapi/runtime/client_operation.go | 4 +- .../go-openapi/runtime/client_request.go | 4 +- .../github.com/go-openapi/runtime/request.go | 14 +- vendor/github.com/go-openapi/spec/.gitignore | 3 +- .../github.com/go-openapi/spec/.golangci.yml | 21 +- vendor/github.com/go-openapi/spec/README.md | 5 +- .../github.com/go-openapi/spec/appveyor.yml | 32 - vendor/github.com/go-openapi/spec/bindata.go | 297 -- vendor/github.com/go-openapi/spec/embed.go | 17 + vendor/github.com/go-openapi/spec/expander.go | 17 +- .../go-openapi/spec/normalizer_nonwindows.go | 2 +- .../github.com/go-openapi/spec/operation.go | 5 +- .../github.com/go-openapi/spec/parameter.go | 42 +- .../go-openapi/spec/schema_loader.go | 9 +- .../spec/schemas/jsonschema-draft-04.json | 149 + .../go-openapi/spec/schemas/v2/schema.json | 1607 +++++++++++ vendor/github.com/go-openapi/spec/spec.go | 6 +- vendor/github.com/go-openapi/spec/swagger.go | 4 +- vendor/github.com/go-openapi/spec/url_go18.go | 8 - vendor/github.com/go-openapi/spec/url_go19.go | 3 - .../go-openapi/strfmt/.golangci.yml | 92 +- vendor/github.com/go-openapi/strfmt/README.md | 5 +- vendor/github.com/go-openapi/strfmt/bson.go | 6 +- .../github.com/go-openapi/strfmt/default.go | 52 +- vendor/github.com/go-openapi/strfmt/format.go | 2 +- vendor/github.com/go-openapi/strfmt/time.go | 8 +- vendor/github.com/go-openapi/swag/.gitignore | 1 + .../github.com/go-openapi/swag/.golangci.yml | 54 +- vendor/github.com/go-openapi/swag/README.md | 8 +- .../{post_go19.go => initialism_index.go} | 3 - vendor/github.com/go-openapi/swag/loading.go | 105 +- .../github.com/go-openapi/swag/post_go18.go | 24 - vendor/github.com/go-openapi/swag/pre_go18.go | 24 - vendor/github.com/go-openapi/swag/pre_go19.go | 70 - vendor/github.com/go-openapi/swag/util.go | 12 +- vendor/github.com/go-openapi/swag/yaml.go | 36 +- .../go-openapi/validate/.golangci.yml | 53 +- .../github.com/go-openapi/validate/README.md | 8 +- .../go-openapi/validate/appveyor.yml | 32 - .../go-openapi/validate/default_validator.go | 3 +- vendor/github.com/go-openapi/validate/doc.go | 70 +- .../go-openapi/validate/example_validator.go | 5 +- .../github.com/go-openapi/validate/helpers.go | 8 +- .../github.com/go-openapi/validate/options.go | 20 +- .../github.com/go-openapi/validate/result.go | 8 +- .../github.com/go-openapi/validate/schema.go | 2 +- vendor/github.com/go-openapi/validate/spec.go | 70 +- .../go-openapi/validate/spec_messages.go | 7 +- vendor/github.com/go-openapi/validate/type.go | 2 +- .../go-openapi/validate/validator.go | 4 +- .../github.com/go-openapi/validate/values.go | 12 +- vendor/github.com/google/uuid/CHANGELOG.md | 7 + vendor/github.com/google/uuid/time.go | 21 +- vendor/github.com/google/uuid/uuid.go | 53 + vendor/github.com/google/uuid/version6.go | 56 + vendor/github.com/google/uuid/version7.go | 75 + vendor/github.com/gorilla/mux/.editorconfig | 20 + vendor/github.com/gorilla/mux/.gitignore | 1 + vendor/github.com/gorilla/mux/AUTHORS | 8 - vendor/github.com/gorilla/mux/LICENSE | 2 +- vendor/github.com/gorilla/mux/Makefile | 34 + vendor/github.com/gorilla/mux/README.md | 61 +- vendor/github.com/gorilla/mux/doc.go | 25 +- vendor/github.com/gorilla/mux/mux.go | 16 +- vendor/github.com/gorilla/mux/regexp.go | 10 +- vendor/github.com/gorilla/mux/route.go | 109 +- .../github.com/klauspost/compress/README.md | 8 + .../klauspost/compress/fse/compress.go | 2 +- .../klauspost/compress/huff0/bytereader.go | 44 - .../klauspost/compress/huff0/compress.go | 5 +- .../klauspost/compress/huff0/huff0.go | 4 +- .../klauspost/compress/zstd/README.md | 2 +- .../klauspost/compress/zstd/enc_best.go | 44 +- .../klauspost/compress/zstd/enc_better.go | 17 +- .../letsencrypt/boulder/core/interfaces.go | 2 +- .../letsencrypt/boulder/core/util.go | 75 + vendor/github.com/mattn/go-sqlite3/sqlite3.go | 13 +- vendor/github.com/moby/buildkit/AUTHORS | 66 + vendor/github.com/moby/buildkit/LICENSE | 201 ++ .../frontend/dockerfile/command/command.go | 46 + .../frontend/dockerfile/parser/directives.go | 171 ++ .../frontend/dockerfile/parser/errors.go | 58 + .../dockerfile/parser/line_parsers.go | 367 +++ .../frontend/dockerfile/parser/parser.go | 552 ++++ .../dockerfile/parser/split_command.go | 117 + .../frontend/dockerfile/shell/envVarTest | 238 ++ .../dockerfile/shell/equal_env_unix.go | 11 + .../dockerfile/shell/equal_env_windows.go | 10 + .../buildkit/frontend/dockerfile/shell/lex.go | 474 ++++ .../frontend/dockerfile/shell/wordsTest | 30 + .../moby/buildkit/util/stack/generate.go | 3 + .../moby/buildkit/util/stack/stack.go | 182 ++ .../moby/buildkit/util/stack/stack.pb.go | 261 ++ .../moby/buildkit/util/stack/stack.proto | 17 + .../moby/sys/mountinfo/mountinfo_linux.go | 50 +- vendor/github.com/moby/sys/user/LICENSE | 202 ++ .../sys}/user/lookup_unix.go | 0 .../libcontainer => moby/sys}/user/user.go | 0 .../sys}/user/user_fuzzer.go | 0 vendor/github.com/opencontainers/runc/NOTICE | 4 +- .../libcontainer/apparmor/apparmor_linux.go | 15 +- .../runc/libcontainer/configs/blkio_device.go | 8 +- .../runc/libcontainer/configs/cgroup_linux.go | 11 + .../runc/libcontainer/configs/config.go | 122 +- .../runc/libcontainer/configs/config_linux.go | 31 +- .../runc/libcontainer/configs/mount.go | 43 +- .../runc/libcontainer/configs/mount_linux.go | 66 + .../libcontainer/configs/mount_unsupported.go | 10 + .../libcontainer/configs/namespaces_linux.go | 7 + .../configs/namespaces_syscall.go | 13 + .../libcontainer/user/lookup_deprecated.go | 81 + .../runc/libcontainer/user/user_deprecated.go | 146 + .../runc/libcontainer/userns/userns.go | 1 - .../runc/libcontainer/userns/userns_fuzzer.go | 11 +- .../runc/libcontainer/userns/userns_linux.go | 44 +- .../libcontainer/userns/userns_unsupported.go | 4 +- .../libcontainer/userns/usernsfd_linux.go | 156 ++ .../runc/libcontainer/utils/cmsg.go | 85 +- .../runc/libcontainer/utils/utils.go | 58 +- .../runc/libcontainer/utils/utils_unix.go | 154 +- .../runtime-spec/specs-go/config.go | 10 + .../runtime-spec/specs-go/version.go | 2 +- .../runtime-tools/generate/generate.go | 2 +- .../generate/seccomp/parse_action.go | 6 +- .../generate/seccomp/seccomp_default.go | 2 - .../openshift/imagebuilder/Makefile | 6 + .../openshift/imagebuilder/builder.go | 26 +- .../openshift/imagebuilder/dispatchers.go | 116 +- .../imagebuilder/dockerfile/parser/parser.go | 128 +- .../openshift/imagebuilder/evaluator.go | 13 +- .../openshift/imagebuilder/imagebuilder.spec | 2 +- .../pkg/generated/models/cose_v001_schema.go | 12 +- vendor/github.com/spf13/cobra/.golangci.yml | 8 +- vendor/github.com/spf13/cobra/README.md | 8 +- vendor/github.com/spf13/cobra/active_help.go | 10 +- vendor/github.com/spf13/cobra/active_help.md | 157 -- .../spf13/cobra/bash_completions.go | 2 +- .../spf13/cobra/bash_completions.md | 93 - .../spf13/cobra/bash_completionsV2.go | 2 +- vendor/github.com/spf13/cobra/cobra.go | 13 +- vendor/github.com/spf13/cobra/command.go | 69 +- vendor/github.com/spf13/cobra/completions.go | 29 +- .../spf13/cobra/fish_completions.go | 2 +- .../spf13/cobra/fish_completions.md | 4 - vendor/github.com/spf13/cobra/flag_groups.go | 68 +- .../spf13/cobra/powershell_completions.go | 6 +- .../spf13/cobra/powershell_completions.md | 3 - .../spf13/cobra/projects_using_cobra.md | 64 - .../spf13/cobra/shell_completions.md | 576 ---- vendor/github.com/spf13/cobra/user_guide.md | 726 ----- .../github.com/spf13/cobra/zsh_completions.md | 48 - .../vanilla-os/prometheus/.devcontainer.json | 3 + .../vanilla-os/prometheus/client.go | 118 +- .../github.com/vanilla-os/prometheus/main.go | 4 - vendor/github.com/vbauerster/mpb/v8/README.md | 6 +- vendor/github.com/vbauerster/mpb/v8/bar.go | 21 +- .../vbauerster/mpb/v8/decor/decorator.go | 27 +- .../github.com/vbauerster/mpb/v8/progress.go | 74 +- .../bson/bsoncodec/codec_cache.go | 166 ++ .../bson/bsoncodec/default_value_decoders.go | 2 +- .../bson/bsoncodec/pointer_codec.go | 59 +- .../mongo-driver/bson/bsoncodec/registry.go | 163 +- .../bson/bsoncodec/slice_codec.go | 2 +- .../bson/bsoncodec/struct_codec.go | 105 +- .../mongo-driver/bson/bsoncodec/types.go | 1 + .../mongo-driver/bson/bsonrw/copier.go | 8 +- .../mongo-driver/bson/bsonrw/value_reader.go | 12 +- .../mongo-driver/bson/bsonrw/value_writer.go | 74 +- .../mongo-driver/bson/bsontype/bsontype.go | 1 + .../mongo-driver/bson/marshal.go | 32 +- .../go.mongodb.org/mongo-driver/bson/raw.go | 11 +- .../go.mongodb.org/mongo-driver/bson/types.go | 1 + .../mongo-driver/x/bsonx/bsoncore/bsoncore.go | 2 +- vendor/golang.org/x/crypto/argon2/argon2.go | 283 ++ vendor/golang.org/x/crypto/argon2/blake2b.go | 53 + .../x/crypto/argon2/blamka_amd64.go | 60 + .../golang.org/x/crypto/argon2/blamka_amd64.s | 243 ++ .../x/crypto/argon2/blamka_generic.go | 163 ++ .../golang.org/x/crypto/argon2/blamka_ref.go | 15 + vendor/golang.org/x/crypto/blake2b/blake2b.go | 291 ++ .../x/crypto/blake2b/blake2bAVX2_amd64.go | 37 + .../x/crypto/blake2b/blake2bAVX2_amd64.s | 744 +++++ .../x/crypto/blake2b/blake2b_amd64.s | 278 ++ .../x/crypto/blake2b/blake2b_generic.go | 182 ++ .../x/crypto/blake2b/blake2b_ref.go | 11 + vendor/golang.org/x/crypto/blake2b/blake2x.go | 177 ++ .../golang.org/x/crypto/blake2b/register.go | 30 + .../x/crypto/internal/poly1305/bits_compat.go | 39 - .../x/crypto/internal/poly1305/bits_go1.13.go | 21 - .../x/crypto/internal/poly1305/sum_generic.go | 43 +- .../x/crypto/internal/poly1305/sum_ppc64le.s | 14 +- vendor/golang.org/x/crypto/ocsp/ocsp.go | 13 +- .../x/crypto/ripemd160/ripemd160.go | 124 + .../x/crypto/ripemd160/ripemd160block.go | 165 ++ vendor/golang.org/x/crypto/twofish/twofish.go | 341 +++ vendor/golang.org/x/crypto/xts/xts.go | 164 ++ vendor/golang.org/x/net/http2/databuffer.go | 59 +- vendor/golang.org/x/net/http2/frame.go | 42 +- vendor/golang.org/x/net/http2/go111.go | 30 - vendor/golang.org/x/net/http2/go115.go | 27 - vendor/golang.org/x/net/http2/go118.go | 17 - vendor/golang.org/x/net/http2/not_go111.go | 21 - vendor/golang.org/x/net/http2/not_go115.go | 31 - vendor/golang.org/x/net/http2/not_go118.go | 17 - vendor/golang.org/x/net/http2/pipe.go | 11 +- vendor/golang.org/x/net/http2/server.go | 37 +- vendor/golang.org/x/net/http2/testsync.go | 331 +++ vendor/golang.org/x/net/http2/transport.go | 340 ++- vendor/golang.org/x/net/idna/go118.go | 1 - vendor/golang.org/x/net/idna/idna10.0.0.go | 1 - vendor/golang.org/x/net/idna/idna9.0.0.go | 1 - vendor/golang.org/x/net/idna/pre_go118.go | 1 - vendor/golang.org/x/net/idna/tables10.0.0.go | 1 - vendor/golang.org/x/net/idna/tables11.0.0.go | 1 - vendor/golang.org/x/net/idna/tables12.0.0.go | 1 - vendor/golang.org/x/net/idna/tables13.0.0.go | 1 - vendor/golang.org/x/net/idna/tables15.0.0.go | 1 - vendor/golang.org/x/net/idna/tables9.0.0.go | 1 - vendor/golang.org/x/net/idna/trie12.0.0.go | 1 - vendor/golang.org/x/net/idna/trie13.0.0.go | 1 - vendor/golang.org/x/sync/errgroup/errgroup.go | 3 + vendor/golang.org/x/sys/execabs/execabs.go | 102 - .../golang.org/x/sys/execabs/execabs_go118.go | 17 - .../golang.org/x/sys/execabs/execabs_go119.go | 20 - vendor/golang.org/x/sys/unix/aliases.go | 2 +- vendor/golang.org/x/sys/unix/mkerrors.sh | 39 +- .../x/sys/unix/syscall_darwin_libSystem.go | 2 +- .../golang.org/x/sys/unix/syscall_freebsd.go | 12 +- vendor/golang.org/x/sys/unix/syscall_linux.go | 99 + vendor/golang.org/x/sys/unix/zerrors_linux.go | 90 +- .../x/sys/unix/zerrors_linux_386.go | 3 + .../x/sys/unix/zerrors_linux_amd64.go | 3 + .../x/sys/unix/zerrors_linux_arm.go | 3 + .../x/sys/unix/zerrors_linux_arm64.go | 3 + .../x/sys/unix/zerrors_linux_loong64.go | 3 + .../x/sys/unix/zerrors_linux_mips.go | 3 + .../x/sys/unix/zerrors_linux_mips64.go | 3 + .../x/sys/unix/zerrors_linux_mips64le.go | 3 + .../x/sys/unix/zerrors_linux_mipsle.go | 3 + .../x/sys/unix/zerrors_linux_ppc.go | 3 + .../x/sys/unix/zerrors_linux_ppc64.go | 3 + .../x/sys/unix/zerrors_linux_ppc64le.go | 3 + .../x/sys/unix/zerrors_linux_riscv64.go | 3 + .../x/sys/unix/zerrors_linux_s390x.go | 3 + .../x/sys/unix/zerrors_linux_sparc64.go | 3 + .../golang.org/x/sys/unix/zsyscall_linux.go | 10 + .../x/sys/unix/zsyscall_openbsd_386.go | 2 - .../x/sys/unix/zsyscall_openbsd_amd64.go | 2 - .../x/sys/unix/zsyscall_openbsd_arm.go | 2 - .../x/sys/unix/zsyscall_openbsd_arm64.go | 2 - .../x/sys/unix/zsyscall_openbsd_mips64.go | 2 - .../x/sys/unix/zsyscall_openbsd_ppc64.go | 2 - .../x/sys/unix/zsyscall_openbsd_riscv64.go | 2 - .../x/sys/unix/zsysnum_linux_386.go | 4 + .../x/sys/unix/zsysnum_linux_amd64.go | 3 + .../x/sys/unix/zsysnum_linux_arm.go | 4 + .../x/sys/unix/zsysnum_linux_arm64.go | 4 + .../x/sys/unix/zsysnum_linux_loong64.go | 4 + .../x/sys/unix/zsysnum_linux_mips.go | 4 + .../x/sys/unix/zsysnum_linux_mips64.go | 4 + .../x/sys/unix/zsysnum_linux_mips64le.go | 4 + .../x/sys/unix/zsysnum_linux_mipsle.go | 4 + .../x/sys/unix/zsysnum_linux_ppc.go | 4 + .../x/sys/unix/zsysnum_linux_ppc64.go | 4 + .../x/sys/unix/zsysnum_linux_ppc64le.go | 4 + .../x/sys/unix/zsysnum_linux_riscv64.go | 4 + .../x/sys/unix/zsysnum_linux_s390x.go | 4 + .../x/sys/unix/zsysnum_linux_sparc64.go | 4 + vendor/golang.org/x/sys/unix/ztypes_linux.go | 185 +- .../golang.org/x/sys/windows/env_windows.go | 17 +- .../x/sys/windows/syscall_windows.go | 4 +- .../x/sys/windows/zsyscall_windows.go | 9 + .../tools/go/internal/packagesdriver/sizes.go | 15 +- vendor/golang.org/x/tools/go/packages/doc.go | 34 +- .../x/tools/go/packages/external.go | 2 +- .../golang.org/x/tools/go/packages/golist.go | 76 +- .../x/tools/go/packages/golist_overlay.go | 492 ---- .../x/tools/go/packages/packages.go | 180 +- .../x/tools/go/types/objectpath/objectpath.go | 131 +- .../x/tools/internal/event/keys/util.go | 21 + .../x/tools/internal/gcimporter/iexport.go | 31 +- .../x/tools/internal/gcimporter/iimport.go | 39 +- .../x/tools/internal/gocommand/invoke.go | 27 +- .../internal/packagesinternal/packages.go | 8 - .../x/tools/internal/typeparams/common.go | 24 +- .../x/tools/internal/typeparams/coretype.go | 8 +- .../internal/typeparams/enabled_go117.go | 12 - .../internal/typeparams/enabled_go118.go | 15 - .../x/tools/internal/typeparams/normalize.go | 20 +- .../internal/typeparams/typeparams_go117.go | 197 -- .../internal/typeparams/typeparams_go118.go | 151 - .../internal/typesinternal/objectpath.go | 24 - .../x/tools/internal/versions/gover.go | 172 ++ .../x/tools/internal/versions/types.go | 19 + .../x/tools/internal/versions/types_go121.go | 20 + .../x/tools/internal/versions/types_go122.go | 24 + .../tools/internal/versions/versions_go121.go | 49 + .../tools/internal/versions/versions_go122.go | 38 + ...r_conn_wrappers.go => balancer_wrapper.go} | 304 +- vendor/google.golang.org/grpc/clientconn.go | 494 ++-- vendor/google.golang.org/grpc/codes/codes.go | 8 +- .../google.golang.org/grpc/credentials/tls.go | 75 +- vendor/google.golang.org/grpc/dialoptions.go | 48 +- .../grpc/internal/buffer/unbounded.go | 41 +- .../grpc/internal/channelz/funcs.go | 7 + .../grpc/internal/envconfig/envconfig.go | 3 - .../grpc/internal/envconfig/xds.go | 39 - .../grpc/internal/experimental.go | 28 + .../internal/grpcsync/callback_serializer.go | 51 +- .../grpc/internal/idle/idle.go | 175 +- .../grpc/internal/internal.go | 9 +- .../internal/resolver/dns/dns_resolver.go | 69 +- .../resolver/dns/internal/internal.go | 70 + .../grpc/internal/tcp_keepalive_nonunix.go | 29 + .../grpc/internal/tcp_keepalive_unix.go | 54 + .../grpc/internal/transport/handler_server.go | 65 +- .../grpc/internal/transport/http2_client.go | 12 +- .../grpc/internal/transport/http2_server.go | 107 +- .../grpc/internal/transport/proxy.go | 14 +- .../grpc/internal/transport/transport.go | 22 +- .../grpc/metadata/metadata.go | 18 +- vendor/google.golang.org/grpc/peer/peer.go | 2 + .../google.golang.org/grpc/picker_wrapper.go | 21 +- vendor/google.golang.org/grpc/pickfirst.go | 14 - .../grpc/resolver/dns/dns_resolver.go | 36 + vendor/google.golang.org/grpc/resolver/map.go | 113 + .../grpc/resolver/resolver.go | 10 +- .../grpc/resolver_conn_wrapper.go | 247 -- .../grpc/resolver_wrapper.go | 197 ++ vendor/google.golang.org/grpc/server.go | 223 +- vendor/google.golang.org/grpc/version.go | 2 +- vendor/google.golang.org/grpc/vet.sh | 166 +- .../protobuf/encoding/protojson/decode.go | 38 +- .../protobuf/encoding/protojson/doc.go | 2 +- .../protobuf/encoding/protojson/encode.go | 39 +- .../encoding/protojson/well_known_types.go | 59 +- .../protobuf/encoding/prototext/decode.go | 8 +- .../protobuf/encoding/prototext/encode.go | 4 +- .../protobuf/encoding/protowire/wire.go | 28 +- .../protobuf/internal/descfmt/stringer.go | 183 +- .../internal/editiondefaults/defaults.go | 12 + .../editiondefaults/editions_defaults.binpb | 4 + .../protobuf/internal/encoding/json/decode.go | 2 +- .../protobuf/internal/filedesc/desc.go | 102 +- .../protobuf/internal/filedesc/desc_init.go | 52 + .../protobuf/internal/filedesc/desc_lazy.go | 28 + .../protobuf/internal/filedesc/editions.go | 142 + .../protobuf/internal/genid/descriptor_gen.go | 364 ++- .../internal/genid/go_features_gen.go | 31 + .../protobuf/internal/genid/struct_gen.go | 5 + .../protobuf/internal/genid/type_gen.go | 38 + .../protobuf/internal/impl/codec_extension.go | 22 +- .../protobuf/internal/impl/codec_gen.go | 113 +- .../protobuf/internal/impl/codec_tables.go | 2 +- .../protobuf/internal/impl/legacy_message.go | 19 +- .../protobuf/internal/impl/message.go | 17 +- .../internal/impl/message_reflect_field.go | 2 +- .../protobuf/internal/impl/pointer_reflect.go | 36 + .../protobuf/internal/impl/pointer_unsafe.go | 40 + .../protobuf/internal/strs/strings.go | 2 +- ...ings_unsafe.go => strings_unsafe_go120.go} | 4 +- .../internal/strs/strings_unsafe_go121.go | 74 + .../protobuf/internal/version/version.go | 2 +- .../protobuf/proto/decode.go | 2 +- .../google.golang.org/protobuf/proto/doc.go | 58 +- .../protobuf/proto/encode.go | 2 +- .../protobuf/proto/extension.go | 2 +- .../google.golang.org/protobuf/proto/merge.go | 2 +- .../google.golang.org/protobuf/proto/proto.go | 18 +- .../protobuf/reflect/protodesc/desc.go | 29 +- .../protobuf/reflect/protodesc/desc_init.go | 56 + .../reflect/protodesc/desc_resolve.go | 4 +- .../reflect/protodesc/desc_validate.go | 6 +- .../protobuf/reflect/protodesc/editions.go | 148 + .../protobuf/reflect/protodesc/proto.go | 18 +- .../protobuf/reflect/protoreflect/proto.go | 85 +- .../reflect/protoreflect/source_gen.go | 64 +- .../protobuf/reflect/protoreflect/type.go | 44 +- .../protobuf/reflect/protoreflect/value.go | 24 +- .../reflect/protoreflect/value_equal.go | 8 +- .../reflect/protoreflect/value_union.go | 44 +- ...{value_unsafe.go => value_unsafe_go120.go} | 4 +- .../protoreflect/value_unsafe_go121.go | 87 + .../reflect/protoregistry/registry.go | 24 +- .../types/descriptorpb/descriptor.pb.go | 2475 ++++++++++++----- .../types/gofeaturespb/go_features.pb.go | 177 ++ .../types/gofeaturespb/go_features.proto | 28 + .../protobuf/types/known/anypb/any.pb.go | 3 +- .../gopkg.in/go-jose/go-jose.v2/CHANGELOG.md | 84 + vendor/gopkg.in/go-jose/go-jose.v2/README.md | 120 +- .../gopkg.in/go-jose/go-jose.v2/asymmetric.go | 3 + vendor/gopkg.in/go-jose/go-jose.v2/crypter.go | 6 + .../gopkg.in/go-jose/go-jose.v2/encoding.go | 21 +- .../gopkg.in/go-jose/go-jose.v2/symmetric.go | 5 + vendor/modules.txt | 191 +- .../container-device-interface/LICENSE | 201 ++ .../pkg/parser/parser.go | 0 702 files changed, 31778 insertions(+), 11856 deletions(-) create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_hive.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value_type.go create mode 100644 vendor/github.com/aead/serpent/.gitignore create mode 100644 vendor/github.com/aead/serpent/LICENSE create mode 100644 vendor/github.com/aead/serpent/README.md create mode 100644 vendor/github.com/aead/serpent/sbox_ref.go create mode 100644 vendor/github.com/aead/serpent/serpent.go create mode 100644 vendor/github.com/aead/serpent/serpent_ref.go create mode 100644 vendor/github.com/containerd/typeurl/v2/.gitignore create mode 100644 vendor/github.com/containerd/typeurl/v2/LICENSE create mode 100644 vendor/github.com/containerd/typeurl/v2/README.md create mode 100644 vendor/github.com/containerd/typeurl/v2/doc.go create mode 100644 vendor/github.com/containerd/typeurl/v2/types.go delete mode 100644 vendor/github.com/containers/buildah/.packit.sh delete mode 100644 vendor/github.com/containers/buildah/buildah.spec.rpkg create mode 100644 vendor/github.com/containers/buildah/convertcw.go rename vendor/github.com/containers/buildah/{util/util_not_uint64.go => copier/hardlink_not_uint64.go} (68%) rename vendor/github.com/containers/buildah/{util/util_uint64.go => copier/hardlink_uint64.go} (95%) create mode 100644 vendor/github.com/containers/buildah/copier/hardlink_unix.go create mode 100644 vendor/github.com/containers/buildah/copier/hardlink_windows.go create mode 100644 vendor/github.com/containers/buildah/internal/config/convert.go create mode 100644 vendor/github.com/containers/buildah/internal/config/executor.go create mode 100644 vendor/github.com/containers/buildah/internal/config/override.go create mode 100644 vendor/github.com/containers/buildah/internal/mkcw/archive.go create mode 100644 vendor/github.com/containers/buildah/internal/mkcw/attest.go create mode 100644 vendor/github.com/containers/buildah/internal/mkcw/embed/entrypoint.gz create mode 100644 vendor/github.com/containers/buildah/internal/mkcw/entrypoint.go create mode 100644 vendor/github.com/containers/buildah/internal/mkcw/luks.go create mode 100644 vendor/github.com/containers/buildah/internal/mkcw/makefs.go create mode 100644 vendor/github.com/containers/buildah/internal/mkcw/types/attest.go create mode 100644 vendor/github.com/containers/buildah/internal/mkcw/types/workload.go create mode 100644 vendor/github.com/containers/buildah/internal/mkcw/workload.go create mode 100644 vendor/github.com/containers/buildah/internal/tmpdir/tmpdir.go create mode 100644 vendor/github.com/containers/buildah/internal/volumes/volumes.go delete mode 100644 vendor/github.com/containers/buildah/libdm_tag.sh create mode 100644 vendor/github.com/containers/buildah/pkg/overlay/overlay_freebsd.go create mode 100644 vendor/github.com/containers/buildah/pkg/overlay/overlay_linux.go create mode 100644 vendor/github.com/containers/buildah/pkg/util/resource_unix.go create mode 100644 vendor/github.com/containers/buildah/pkg/util/resource_windows.go delete mode 100644 vendor/github.com/containers/buildah/selinux_tag.sh create mode 100644 vendor/github.com/containers/common/internal/attributedstring/slice.go create mode 100644 vendor/github.com/containers/common/libimage/define/platform.go create mode 100644 vendor/github.com/containers/common/libimage/platform/platform.go create mode 100644 vendor/github.com/containers/common/libnetwork/pasta/pasta.go create mode 100644 vendor/github.com/containers/common/libnetwork/slirp4netns/const.go create mode 100644 vendor/github.com/containers/common/libnetwork/slirp4netns/slirp4netns.go create mode 100644 vendor/github.com/containers/common/pkg/auth/auth.go create mode 100644 vendor/github.com/containers/common/pkg/auth/cli.go create mode 100644 vendor/github.com/containers/common/pkg/completion/completion.go create mode 100644 vendor/github.com/containers/common/pkg/password/password_supported.go create mode 100644 vendor/github.com/containers/common/pkg/password/password_windows.go create mode 100644 vendor/github.com/containers/common/pkg/rootlessport/rootlessport_linux.go create mode 100644 vendor/github.com/containers/common/pkg/servicereaper/service.go delete mode 100644 vendor/github.com/containers/common/pkg/util/copy.go create mode 100644 vendor/github.com/containers/common/pkg/version/version.go create mode 100644 vendor/github.com/containers/image/v5/oci/layout/oci_delete.go create mode 100644 vendor/github.com/containers/luksy/.cirrus.yml create mode 100644 vendor/github.com/containers/luksy/.dockerignore create mode 100644 vendor/github.com/containers/luksy/.gitignore create mode 100644 vendor/github.com/containers/luksy/Dockerfile rename vendor/github.com/{container-orchestrated-devices/container-device-interface => containers/luksy}/LICENSE (100%) create mode 100644 vendor/github.com/containers/luksy/Makefile create mode 100644 vendor/github.com/containers/luksy/OWNERS create mode 100644 vendor/github.com/containers/luksy/README.md create mode 100644 vendor/github.com/containers/luksy/decrypt.go create mode 100644 vendor/github.com/containers/luksy/encrypt.go create mode 100644 vendor/github.com/containers/luksy/encryption.go create mode 100644 vendor/github.com/containers/luksy/luks.go create mode 100644 vendor/github.com/containers/luksy/tune.go create mode 100644 vendor/github.com/containers/luksy/v1header.go create mode 100644 vendor/github.com/containers/luksy/v2header.go create mode 100644 vendor/github.com/containers/luksy/v2json.go delete mode 100644 vendor/github.com/containers/ocicrypt/config/pkcs11config/config.go delete mode 100644 vendor/github.com/containers/ocicrypt/helpers/parse_helpers.go create mode 100644 vendor/github.com/containers/storage/pkg/chunked/dump/dump.go rename vendor/github.com/docker/go-connections/tlsconfig/{certpool_go17.go => certpool.go} (95%) delete mode 100644 vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go delete mode 100644 vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go delete mode 100644 vendor/github.com/docker/go-connections/tlsconfig/versions_go113.go delete mode 100644 vendor/github.com/docker/go-connections/tlsconfig/versions_other.go delete mode 100644 vendor/github.com/go-jose/go-jose/v3/BUG-BOUNTY.md create mode 100644 vendor/github.com/go-jose/go-jose/v3/SECURITY.md delete mode 100644 vendor/github.com/go-openapi/analysis/appveyor.yml create mode 100644 vendor/github.com/go-openapi/jsonpointer/.golangci.yml create mode 100644 vendor/github.com/go-openapi/loads/TODO.md delete mode 100644 vendor/github.com/go-openapi/spec/appveyor.yml delete mode 100644 vendor/github.com/go-openapi/spec/bindata.go create mode 100644 vendor/github.com/go-openapi/spec/embed.go create mode 100644 vendor/github.com/go-openapi/spec/schemas/jsonschema-draft-04.json create mode 100644 vendor/github.com/go-openapi/spec/schemas/v2/schema.json delete mode 100644 vendor/github.com/go-openapi/spec/url_go18.go rename vendor/github.com/go-openapi/swag/{post_go19.go => initialism_index.go} (98%) delete mode 100644 vendor/github.com/go-openapi/swag/post_go18.go delete mode 100644 vendor/github.com/go-openapi/swag/pre_go18.go delete mode 100644 vendor/github.com/go-openapi/swag/pre_go19.go delete mode 100644 vendor/github.com/go-openapi/validate/appveyor.yml create mode 100644 vendor/github.com/google/uuid/version6.go create mode 100644 vendor/github.com/google/uuid/version7.go create mode 100644 vendor/github.com/gorilla/mux/.editorconfig create mode 100644 vendor/github.com/gorilla/mux/.gitignore delete mode 100644 vendor/github.com/gorilla/mux/AUTHORS create mode 100644 vendor/github.com/gorilla/mux/Makefile delete mode 100644 vendor/github.com/klauspost/compress/huff0/bytereader.go create mode 100644 vendor/github.com/moby/buildkit/AUTHORS create mode 100644 vendor/github.com/moby/buildkit/LICENSE create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/command/command.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/parser/directives.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/parser/errors.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/parser/line_parsers.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/parser/split_command.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/shell/envVarTest create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_unix.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_windows.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/shell/wordsTest create mode 100644 vendor/github.com/moby/buildkit/util/stack/generate.go create mode 100644 vendor/github.com/moby/buildkit/util/stack/stack.go create mode 100644 vendor/github.com/moby/buildkit/util/stack/stack.pb.go create mode 100644 vendor/github.com/moby/buildkit/util/stack/stack.proto create mode 100644 vendor/github.com/moby/sys/user/LICENSE rename vendor/github.com/{opencontainers/runc/libcontainer => moby/sys}/user/lookup_unix.go (100%) rename vendor/github.com/{opencontainers/runc/libcontainer => moby/sys}/user/user.go (100%) rename vendor/github.com/{opencontainers/runc/libcontainer => moby/sys}/user/user_fuzzer.go (100%) create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/mount_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/mount_unsupported.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/user/lookup_deprecated.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/user/user_deprecated.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/userns/usernsfd_linux.go delete mode 100644 vendor/github.com/spf13/cobra/active_help.md delete mode 100644 vendor/github.com/spf13/cobra/bash_completions.md delete mode 100644 vendor/github.com/spf13/cobra/fish_completions.md delete mode 100644 vendor/github.com/spf13/cobra/powershell_completions.md delete mode 100644 vendor/github.com/spf13/cobra/projects_using_cobra.md delete mode 100644 vendor/github.com/spf13/cobra/shell_completions.md delete mode 100644 vendor/github.com/spf13/cobra/user_guide.md delete mode 100644 vendor/github.com/spf13/cobra/zsh_completions.md create mode 100644 vendor/github.com/vanilla-os/prometheus/.devcontainer.json create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/codec_cache.go create mode 100644 vendor/golang.org/x/crypto/argon2/argon2.go create mode 100644 vendor/golang.org/x/crypto/argon2/blake2b.go create mode 100644 vendor/golang.org/x/crypto/argon2/blamka_amd64.go create mode 100644 vendor/golang.org/x/crypto/argon2/blamka_amd64.s create mode 100644 vendor/golang.org/x/crypto/argon2/blamka_generic.go create mode 100644 vendor/golang.org/x/crypto/argon2/blamka_ref.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_generic.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_ref.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2x.go create mode 100644 vendor/golang.org/x/crypto/blake2b/register.go delete mode 100644 vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go delete mode 100644 vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go create mode 100644 vendor/golang.org/x/crypto/ripemd160/ripemd160.go create mode 100644 vendor/golang.org/x/crypto/ripemd160/ripemd160block.go create mode 100644 vendor/golang.org/x/crypto/twofish/twofish.go create mode 100644 vendor/golang.org/x/crypto/xts/xts.go delete mode 100644 vendor/golang.org/x/net/http2/go111.go delete mode 100644 vendor/golang.org/x/net/http2/go115.go delete mode 100644 vendor/golang.org/x/net/http2/go118.go delete mode 100644 vendor/golang.org/x/net/http2/not_go111.go delete mode 100644 vendor/golang.org/x/net/http2/not_go115.go delete mode 100644 vendor/golang.org/x/net/http2/not_go118.go create mode 100644 vendor/golang.org/x/net/http2/testsync.go delete mode 100644 vendor/golang.org/x/sys/execabs/execabs.go delete mode 100644 vendor/golang.org/x/sys/execabs/execabs_go118.go delete mode 100644 vendor/golang.org/x/sys/execabs/execabs_go119.go create mode 100644 vendor/golang.org/x/tools/internal/event/keys/util.go delete mode 100644 vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go delete mode 100644 vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go delete mode 100644 vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go delete mode 100644 vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go delete mode 100644 vendor/golang.org/x/tools/internal/typesinternal/objectpath.go create mode 100644 vendor/golang.org/x/tools/internal/versions/gover.go create mode 100644 vendor/golang.org/x/tools/internal/versions/types.go create mode 100644 vendor/golang.org/x/tools/internal/versions/types_go121.go create mode 100644 vendor/golang.org/x/tools/internal/versions/types_go122.go create mode 100644 vendor/golang.org/x/tools/internal/versions/versions_go121.go create mode 100644 vendor/golang.org/x/tools/internal/versions/versions_go122.go rename vendor/google.golang.org/grpc/{balancer_conn_wrappers.go => balancer_wrapper.go} (57%) create mode 100644 vendor/google.golang.org/grpc/internal/experimental.go create mode 100644 vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go create mode 100644 vendor/google.golang.org/grpc/internal/tcp_keepalive_nonunix.go create mode 100644 vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go create mode 100644 vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go delete mode 100644 vendor/google.golang.org/grpc/resolver_conn_wrapper.go create mode 100644 vendor/google.golang.org/grpc/resolver_wrapper.go create mode 100644 vendor/google.golang.org/protobuf/internal/editiondefaults/defaults.go create mode 100644 vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/editions.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go rename vendor/google.golang.org/protobuf/internal/strs/{strings_unsafe.go => strings_unsafe_go120.go} (96%) create mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/editions.go rename vendor/google.golang.org/protobuf/reflect/protoreflect/{value_unsafe.go => value_unsafe_go120.go} (97%) create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go create mode 100644 vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.proto create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/CHANGELOG.md create mode 100644 vendor/tags.cncf.io/container-device-interface/LICENSE rename vendor/{github.com/container-orchestrated-devices => tags.cncf.io}/container-device-interface/pkg/parser/parser.go (100%) diff --git a/go.mod b/go.mod index 9b730b62..a14c3f91 100644 --- a/go.mod +++ b/go.mod @@ -3,8 +3,8 @@ module github.com/vanilla-os/albius go 1.21 require ( - github.com/containers/storage v1.50.2 - github.com/vanilla-os/prometheus v0.1.6 + github.com/containers/storage v1.51.0 + github.com/vanilla-os/prometheus v1.0.0 ) require ( @@ -12,75 +12,79 @@ require ( github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect github.com/BurntSushi/toml v1.3.2 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect - github.com/Microsoft/hcsshim v0.12.0-rc.1 // indirect + github.com/Microsoft/hcsshim v0.12.0-rc.2 // indirect github.com/VividCortex/ewma v1.2.0 // indirect github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect + github.com/aead/serpent v0.0.0-20160714141033-fba169763ea6 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/chzyer/readline v1.5.1 // indirect - github.com/container-orchestrated-devices/container-device-interface v0.6.1 // indirect - github.com/containerd/cgroups/v3 v3.0.2 // indirect + github.com/containerd/cgroups/v3 v3.0.3 // indirect github.com/containerd/containerd v1.7.11 // indirect github.com/containerd/log v0.1.0 // indirect github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect + github.com/containerd/typeurl/v2 v2.1.1 // indirect github.com/containernetworking/cni v1.1.2 // indirect - github.com/containernetworking/plugins v1.3.0 // indirect - github.com/containers/buildah v1.30.0 // indirect - github.com/containers/common v0.56.0 // indirect - github.com/containers/image/v5 v5.28.0 // indirect + github.com/containernetworking/plugins v1.4.0 // indirect + github.com/containers/buildah v1.33.7 // indirect + github.com/containers/common v0.57.4 // indirect + github.com/containers/image/v5 v5.29.2 // indirect github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect + github.com/containers/luksy v0.0.0-20231127213545-c2b9b9dbf004 // indirect github.com/containers/ocicrypt v1.1.9 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 // indirect + github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f // indirect github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/disiqueira/gotree/v3 v3.0.2 // indirect github.com/distribution/reference v0.5.0 // indirect github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker v24.0.7+incompatible // indirect + github.com/docker/docker v24.0.9+incompatible // indirect github.com/docker/docker-credential-helpers v0.8.0 // indirect - github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 // indirect + github.com/docker/go-connections v0.4.1-0.20231110212414-fa09c952e3ea // indirect github.com/docker/go-units v0.5.0 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/fsouza/go-dockerclient v1.10.0 // indirect - github.com/go-jose/go-jose/v3 v3.0.1 // indirect - github.com/go-openapi/analysis v0.21.4 // indirect - github.com/go-openapi/errors v0.20.4 // indirect - github.com/go-openapi/jsonpointer v0.20.0 // indirect - github.com/go-openapi/jsonreference v0.20.2 // indirect - github.com/go-openapi/loads v0.21.2 // indirect - github.com/go-openapi/runtime v0.26.0 // indirect - github.com/go-openapi/spec v0.20.9 // indirect - github.com/go-openapi/strfmt v0.21.7 // indirect - github.com/go-openapi/swag v0.22.4 // indirect - github.com/go-openapi/validate v0.22.1 // indirect + github.com/fsouza/go-dockerclient v1.10.1 // indirect + github.com/go-jose/go-jose/v3 v3.0.3 // indirect + github.com/go-openapi/analysis v0.22.1 // indirect + github.com/go-openapi/errors v0.21.0 // indirect + github.com/go-openapi/jsonpointer v0.20.2 // indirect + github.com/go-openapi/jsonreference v0.20.4 // indirect + github.com/go-openapi/loads v0.21.5 // indirect + github.com/go-openapi/runtime v0.26.2 // indirect + github.com/go-openapi/spec v0.20.13 // indirect + github.com/go-openapi/strfmt v0.22.0 // indirect + github.com/go-openapi/swag v0.22.7 // indirect + github.com/go-openapi/validate v0.22.6 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect - github.com/google/go-containerregistry v0.16.1 // indirect + github.com/google/go-containerregistry v0.17.0 // indirect github.com/google/go-intervals v0.0.2 // indirect - github.com/google/uuid v1.4.0 // indirect - github.com/gorilla/mux v1.8.0 // indirect + github.com/google/uuid v1.5.0 // indirect + github.com/gorilla/mux v1.8.1 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jinzhu/copier v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.2 // indirect + github.com/klauspost/compress v1.17.4 // indirect github.com/klauspost/pgzip v1.2.6 // indirect - github.com/letsencrypt/boulder v0.0.0-20231103135357-6d8e6e74f818 // indirect + github.com/letsencrypt/boulder v0.0.0-20240104140712-c1f7de06e9f8 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/manifoldco/promptui v0.9.0 // indirect github.com/mattn/go-runewidth v0.0.15 // indirect github.com/mattn/go-shellwords v1.0.12 // indirect - github.com/mattn/go-sqlite3 v1.14.18 // indirect + github.com/mattn/go-sqlite3 v1.14.19 // indirect github.com/miekg/pkcs11 v1.1.1 // indirect github.com/mistifyio/go-zfs/v3 v3.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/moby/buildkit v0.12.5 // indirect github.com/moby/patternmatcher v0.6.0 // indirect - github.com/moby/sys/mountinfo v0.6.2 // indirect + github.com/moby/sys/mountinfo v0.7.1 // indirect github.com/moby/sys/sequential v0.5.0 // indirect + github.com/moby/sys/user v0.1.0 // indirect github.com/moby/term v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect @@ -88,50 +92,51 @@ require ( github.com/oklog/ulid v1.3.1 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0-rc5 // indirect - github.com/opencontainers/runc v1.1.12 // indirect - github.com/opencontainers/runtime-spec v1.1.0 // indirect - github.com/opencontainers/runtime-tools v0.9.1-0.20230317050512-e931285f4b69 // indirect + github.com/opencontainers/runc v1.2.0-rc.1 // indirect + github.com/opencontainers/runtime-spec v1.2.0 // indirect + github.com/opencontainers/runtime-tools v0.9.1-0.20230914150019-408c51e934dc // indirect github.com/opencontainers/selinux v1.11.0 // indirect - github.com/openshift/imagebuilder v1.2.5 // indirect + github.com/openshift/imagebuilder v1.2.6-0.20231127234745-ef2a5fe47510 // indirect github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/proglottis/gpgme v0.1.3 // indirect github.com/rivo/uniseg v0.4.4 // indirect github.com/seccomp/libseccomp-golang v0.10.0 // indirect - github.com/secure-systems-lab/go-securesystemslib v0.7.0 // indirect + github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect github.com/sigstore/fulcio v1.4.3 // indirect - github.com/sigstore/rekor v1.3.3 // indirect - github.com/sigstore/sigstore v1.7.5 // indirect + github.com/sigstore/rekor v1.3.4 // indirect + github.com/sigstore/sigstore v1.8.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect - github.com/spf13/cobra v1.7.0 // indirect + github.com/spf13/cobra v1.8.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 // indirect - github.com/sylabs/sif/v2 v2.15.0 // indirect + github.com/sylabs/sif/v2 v2.15.1 // indirect github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect github.com/tchap/go-patricia/v2 v2.3.1 // indirect github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect github.com/ulikunitz/xz v0.5.11 // indirect github.com/vbatts/tar-split v0.11.5 // indirect - github.com/vbauerster/mpb/v8 v8.6.2 // indirect + github.com/vbauerster/mpb/v8 v8.7.1 // indirect github.com/vishvananda/netlink v1.2.1-beta.2 // indirect github.com/vishvananda/netns v0.0.4 // indirect go.etcd.io/bbolt v1.3.8 // indirect - go.mongodb.org/mongo-driver v1.12.1 // indirect + go.mongodb.org/mongo-driver v1.13.1 // indirect go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect go.opencensus.io v0.24.0 // indirect - golang.org/x/crypto v0.17.0 // indirect - golang.org/x/exp v0.0.0-20231006140011-7918f672742d // indirect + golang.org/x/crypto v0.21.0 // indirect + golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc // indirect golang.org/x/mod v0.14.0 // indirect - golang.org/x/net v0.17.0 // indirect - golang.org/x/sync v0.5.0 // indirect - golang.org/x/sys v0.15.0 // indirect - golang.org/x/term v0.15.0 // indirect + golang.org/x/net v0.23.0 // indirect + golang.org/x/sync v0.6.0 // indirect + golang.org/x/sys v0.18.0 // indirect + golang.org/x/term v0.18.0 // indirect golang.org/x/text v0.14.0 // indirect - golang.org/x/tools v0.14.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405 // indirect - google.golang.org/grpc v1.59.0 // indirect - google.golang.org/protobuf v1.31.0 // indirect - gopkg.in/go-jose/go-jose.v2 v2.6.1 // indirect + golang.org/x/tools v0.16.1 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917 // indirect + google.golang.org/grpc v1.60.1 // indirect + google.golang.org/protobuf v1.33.0 // indirect + gopkg.in/go-jose/go-jose.v2 v2.6.3 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect + tags.cncf.io/container-device-interface v0.6.2 // indirect ) diff --git a/go.sum b/go.sum index 96509f31..fbf51bcf 100644 --- a/go.sum +++ b/go.sum @@ -10,18 +10,16 @@ github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg6 github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= -github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/Microsoft/hcsshim v0.12.0-rc.1 h1:Hy+xzYujv7urO5wrgcG58SPMOXNLrj4WCJbySs2XX/A= -github.com/Microsoft/hcsshim v0.12.0-rc.1/go.mod h1:Y1a1S0QlYp1mBpyvGiuEdOfZqnao+0uX5AWHXQ5NhZU= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Microsoft/hcsshim v0.12.0-rc.2 h1:gfKebjq3Mq17Ys+4cjE8vc2h6tZVeqCGb9a7vBVqpAk= +github.com/Microsoft/hcsshim v0.12.0-rc.2/go.mod h1:G2TZhBED5frlh/hsuxV5CDh/ylkSFknPAMPpQg9owQw= github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow= github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= -github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/aead/serpent v0.0.0-20160714141033-fba169763ea6 h1:5L8Mj9Co9sJVgW3TpYk2gxGJnDjsYuboNTcRmbtGKGs= +github.com/aead/serpent v0.0.0-20160714141033-fba169763ea6/go.mod h1:3HgLJ9d18kXMLQlJvIY3+FszZYMxCz8WfE2MQ7hDY0w= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -43,40 +41,41 @@ github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04= github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/container-orchestrated-devices/container-device-interface v0.6.1 h1:mz77uJoP8im/4Zins+mPqt677ZMaflhoGaYrRAl5jvA= -github.com/container-orchestrated-devices/container-device-interface v0.6.1/go.mod h1:40T6oW59rFrL/ksiSs7q45GzjGlbvxnA4xaK6cyq+kA= -github.com/containerd/cgroups/v3 v3.0.2 h1:f5WFqIVSgo5IZmtTT3qVBo6TzI1ON6sycSBKkymb9L0= -github.com/containerd/cgroups/v3 v3.0.2/go.mod h1:JUgITrzdFqp42uI2ryGA+ge0ap/nxzYgkGmIcetmErE= +github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0= +github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0= github.com/containerd/containerd v1.7.11 h1:lfGKw3eU35sjV0aG2eYZTiwFEY1pCzxdzicHP3SZILw= github.com/containerd/containerd v1.7.11/go.mod h1:5UluHxHTX2rdvYuZ5OJTC5m/KJNs0Zs9wVoJm9zf5ZE= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G/ZW/0kEe2oEKCdS/ZxIyoCU= github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk= +github.com/containerd/typeurl/v2 v2.1.1 h1:3Q4Pt7i8nYwy2KmQWIw2+1hTvwTE/6w9FqcttATPO/4= +github.com/containerd/typeurl/v2 v2.1.1/go.mod h1:IDp2JFvbwZ31H8dQbEIY7sDl2L3o3HZj1hsSQlywkQ0= github.com/containernetworking/cni v1.1.2 h1:wtRGZVv7olUHMOqouPpn3cXJWpJgM6+EUl31EQbXALQ= github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= -github.com/containernetworking/plugins v1.3.0 h1:QVNXMT6XloyMUoO2wUOqWTC1hWFV62Q6mVDp5H1HnjM= -github.com/containernetworking/plugins v1.3.0/go.mod h1:Pc2wcedTQQCVuROOOaLBPPxrEXqqXBFt3cZ+/yVg6l0= -github.com/containers/buildah v1.30.0 h1:mdp2COGKFFEZNEGP8VZ5ITuUFVNPFoH+iK2sSesNfTA= -github.com/containers/buildah v1.30.0/go.mod h1:lyMLZIevpAa6zSzjRl7z4lFJMCMQLFjfo56YIefaB/U= -github.com/containers/common v0.56.0 h1:hysHUsEai1EkMXanU26UV55wMXns/a6AYmaFqJ4fEMY= -github.com/containers/common v0.56.0/go.mod h1:IjaDdfUtcs2CfCcJMZxuut4XlvkTkY9Nlqkso9xCOq4= -github.com/containers/image/v5 v5.28.0 h1:H4cWbdI88UA/mDb6SxMo3IxpmS1BSs/Kifvhwt9g048= -github.com/containers/image/v5 v5.28.0/go.mod h1:9aPnNkwHNHgGl9VlQxXEshvmOJRbdRAc1rNDD6sP2eU= +github.com/containernetworking/plugins v1.4.0 h1:+w22VPYgk7nQHw7KT92lsRmuToHvb7wwSv9iTbXzzic= +github.com/containernetworking/plugins v1.4.0/go.mod h1:UYhcOyjefnrQvKvmmyEKsUA+M9Nfn7tqULPpH0Pkcj0= +github.com/containers/buildah v1.33.7 h1:Y2kNea+hNNyZ74ppYFWmD0cLc/DwZ5A4NEUPQWPj5Zw= +github.com/containers/buildah v1.33.7/go.mod h1:pphfdjrwtTWkuIy1aDyZMEVyMfmm0DsbvxLGxxEU1cM= +github.com/containers/common v0.57.4 h1:kmfBad92kUjP5X44BPpOwMe+eZQqaKETfS+ASeL0g+g= +github.com/containers/common v0.57.4/go.mod h1:o3L3CyOI9yr+JC8l4dZgvqTxcjs3qdKmkek00uchgvw= +github.com/containers/image/v5 v5.29.2 h1:b8U0XYWhaQbKucK73IbmSm8WQyKAhKDbAHQc45XlsOw= +github.com/containers/image/v5 v5.29.2/go.mod h1:kQ7qcDsps424ZAz24thD+x7+dJw1vgur3A9tTDsj97E= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= +github.com/containers/luksy v0.0.0-20231127213545-c2b9b9dbf004 h1:FsoozXJsdo9dinHt+ojlCE9ohn3Zll5Y7C4OUpqEBBY= +github.com/containers/luksy v0.0.0-20231127213545-c2b9b9dbf004/go.mod h1:ffHQGPqiNdmuTLRSprWamjeeeMjZA7t/q4QUd/OOrz4= github.com/containers/ocicrypt v1.1.9 h1:2Csfba4jse85Raxk5HIyEk8OwZNjRvfkhEGijOjIdEM= github.com/containers/ocicrypt v1.1.9/go.mod h1:dTKx1918d8TDkxXvarscpNVY+lyPakPNFN4jwA9GBys= -github.com/containers/storage v1.50.2 h1:Fys4BjFUVNRBEXlO70hFI48VW4EXsgnGisTpk9tTMsE= -github.com/containers/storage v1.50.2/go.mod h1:dpspZsUrcKD8SpTofvKWhwPDHD0MkO4Q7VE+oYdWkiA= +github.com/containers/storage v1.51.0 h1:AowbcpiWXzAjHosKz7MKvPEqpyX+ryZA/ZurytRrFNA= +github.com/containers/storage v1.51.0/go.mod h1:ybl8a3j1PPtpyaEi/5A6TOFs+5TrEyObeKJzVtkUlfc= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 h1:2Dx4IHfC1yHWI12AxQDJM1QbRCDfk6M+blLzlZCXdrc= -github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= +github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f h1:eHnXnuK47UlSTOQexbzxAZfekVz6i+LKRdj1CU5DPaM= +github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -87,14 +86,18 @@ github.com/disiqueira/gotree/v3 v3.0.2 h1:ik5iuLQQoufZBNPY518dXhiO5056hyNBIK9lWh github.com/disiqueira/gotree/v3 v3.0.2/go.mod h1:ZuyjE4+mUQZlbpkI24AmruZKhg3VHEgPLDY8Qk+uUu8= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/cli v24.0.7+incompatible h1:wa/nIwYFW7BVTGa7SWPVyyXU9lgORqUb1xfI36MSkFg= +github.com/docker/cli v24.0.7+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM= -github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v24.0.9+incompatible h1:HPGzNmwfLZWdxHqK9/II92pyi1EpYKsAqcl4G0Of9v0= +github.com/docker/docker v24.0.9+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8= github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40= -github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 h1:IPrmumsT9t5BS7XcPhgsCTlkWbYg80SEXUzDpReaU6Y= -github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11/go.mod h1:a6bNUGTbQBsY6VRHTr4h/rkOXjl244DyRD0tx3fgq4Q= +github.com/docker/go-connections v0.4.1-0.20231110212414-fa09c952e3ea h1:+4n+kUVbPdu6qMI9SUnSKMC+D50gNW4L7Lhk9tI2lVo= +github.com/docker/go-connections v0.4.1-0.20231110212414-fa09c952e3ea/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -105,81 +108,37 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/fsouza/go-dockerclient v1.10.0 h1:ppSBsbR60I1DFbV4Ag7LlHlHakHFRNLk9XakATW1yVQ= -github.com/fsouza/go-dockerclient v1.10.0/go.mod h1:+iNzAW78AzClIBTZ6WFjkaMvOgz68GyCJ236b1opLTs= -github.com/go-jose/go-jose/v3 v3.0.1 h1:pWmKFVtt+Jl0vBZTIpz/eAKwsm6LkIxDVVbFHKkchhA= -github.com/go-jose/go-jose/v3 v3.0.1/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= -github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= -github.com/go-openapi/analysis v0.21.4 h1:ZDFLvSNxpDaomuCueM0BlSXxpANBlFYiBvr+GXrvIHc= -github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9QyAgQRPp9y3pfo= -github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.20.4 h1:unTcVm6PispJsMECE3zWgvG4xTiKda1LIR5rCRWLG6M= -github.com/go-openapi/errors v0.20.4/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= -github.com/go-openapi/jsonpointer v0.20.0 h1:ESKJdU9ASRfaPNOPRx12IUyA1vn3R9GiE3KYD14BXdQ= -github.com/go-openapi/jsonpointer v0.20.0/go.mod h1:6PGzBjjIIumbLYysB73Klnms1mwnU4G3YHOECG3CedA= -github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= -github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= -github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= -github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= -github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g= -github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro= -github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw= -github.com/go-openapi/runtime v0.26.0 h1:HYOFtG00FM1UvqrcxbEJg/SwvDRvYLQKGhw2zaQjTcc= -github.com/go-openapi/runtime v0.26.0/go.mod h1:QgRGeZwrUcSHdeh4Ka9Glvo0ug1LC5WyE+EV88plZrQ= -github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I= -github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= -github.com/go-openapi/spec v0.20.9 h1:xnlYNQAwKd2VQRRfwTEI0DcK+2cbuvI/0c7jx3gA8/8= -github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= -github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= -github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= -github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= -github.com/go-openapi/strfmt v0.21.7 h1:rspiXgNWgeUzhjo1YU01do6qsahtJNByjLVbPLNHb8k= -github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= -github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU= -github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/fsouza/go-dockerclient v1.10.1 h1:bSU5Wu2ARdub+iv9VtoDsN8yBUI0vgflmshbeQLKhvc= +github.com/fsouza/go-dockerclient v1.10.1/go.mod h1:dyzGriw6v3pK4O4O1u/X+vXxDDsrnLLkCqYkcLsDq2k= +github.com/go-jose/go-jose/v3 v3.0.3 h1:fFKWeig/irsp7XD2zBxvnmA/XaRWp5V3CBsZXJF7G7k= +github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ= +github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-openapi/analysis v0.22.1 h1:nBWSKtx/OQr6FADwy0AYDZFdT05Abv5eYWECByHEQMg= +github.com/go-openapi/analysis v0.22.1/go.mod h1:acDnkkCI2QxIo8sSIPgmp1wUlRohV7vfGtAIVae73b0= +github.com/go-openapi/errors v0.21.0 h1:FhChC/duCnfoLj1gZ0BgaBmzhJC2SL/sJr8a2vAobSY= +github.com/go-openapi/errors v0.21.0/go.mod h1:jxNTMUxRCKj65yb/okJGEtahVd7uvWnuWfj53bse4ho= +github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q= +github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs= +github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU= +github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4= +github.com/go-openapi/loads v0.21.5 h1:jDzF4dSoHw6ZFADCGltDb2lE4F6De7aWSpe+IcsRzT0= +github.com/go-openapi/loads v0.21.5/go.mod h1:PxTsnFBoBe+z89riT+wYt3prmSBP6GDAQh2l9H1Flz8= +github.com/go-openapi/runtime v0.26.2 h1:elWyB9MacRzvIVgAZCBJmqTi7hBzU0hlKD4IvfX0Zl0= +github.com/go-openapi/runtime v0.26.2/go.mod h1:O034jyRZ557uJKzngbMDJXkcKJVzXJiymdSfgejrcRw= +github.com/go-openapi/spec v0.20.13 h1:XJDIN+dLH6vqXgafnl5SUIMnzaChQ6QTo0/UPMbkIaE= +github.com/go-openapi/spec v0.20.13/go.mod h1:8EOhTpBoFiask8rrgwbLC3zmJfz4zsCUueRuPM6GNkw= +github.com/go-openapi/strfmt v0.22.0 h1:Ew9PnEYc246TwrEspvBdDHS4BVKXy/AOVsfqGDgAcaI= +github.com/go-openapi/strfmt v0.22.0/go.mod h1:HzJ9kokGIju3/K6ap8jL+OlGAbjpSv27135Yr9OivU4= +github.com/go-openapi/swag v0.22.7 h1:JWrc1uc/P9cSomxfnsFSVWoE1FW6bNbrVPmpQYpCcR8= +github.com/go-openapi/swag v0.22.7/go.mod h1:Gl91UqO+btAM0plGGxHqJcQZ1ZTy6jbmridBTsDy8A0= +github.com/go-openapi/validate v0.22.6 h1:+NhuwcEYpWdO5Nm4bmvhGLW0rt1Fcc532Mu3wpypXfo= +github.com/go-openapi/validate v0.22.6/go.mod h1:eaddXSqKeTg5XpSmj1dYyFTK/95n/XHwcOY+BMxKMyM= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg= github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= -github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= -github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= -github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= -github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= -github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= -github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= -github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= -github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= -github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= -github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= -github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= -github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= -github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= -github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= -github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= -github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= -github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= -github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= @@ -213,23 +172,23 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-containerregistry v0.16.1 h1:rUEt426sR6nyrL3gt+18ibRcvYpKYdpsa5ZW7MA08dQ= -github.com/google/go-containerregistry v0.16.1/go.mod h1:u0qB2l7mvtWVR5kNcbFIhFY1hLbf8eeGapA+vbFDCtQ= +github.com/google/go-containerregistry v0.17.0 h1:5p+zYs/R4VGHkhyvgWurWrpJ2hW4Vv9fQI+GzdcwXLk= +github.com/google/go-containerregistry v0.17.0/go.mod h1:u0qB2l7mvtWVR5kNcbFIhFY1hLbf8eeGapA+vbFDCtQ= github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM= github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b h1:RMpPgZTSApbPf7xaVel+QkoGPRLFLrwFO89uDUHEGf0= github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= -github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= +github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -237,54 +196,39 @@ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+l github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jinzhu/copier v0.4.0 h1:w3ciUoD19shMCRargcpm0cm91ytaBhDvuRpz1ODO/U8= github.com/jinzhu/copier v0.4.0/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= github.com/jmhodges/clock v1.2.0 h1:eq4kys+NI0PLngzaHEe7AmPT90XMGIEySD1JfV1PDIs= github.com/jmhodges/clock v1.2.0/go.mod h1:qKjhA7x7u/lQpPB1XAqX1b1lCI/w3/fNuYpI/ZjLynI= -github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= -github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4= -github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= +github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/letsencrypt/boulder v0.0.0-20231103135357-6d8e6e74f818 h1:3KAItcBZS9abipHoUx0ANzBp57VBXR+aTLDrQy+HY5I= -github.com/letsencrypt/boulder v0.0.0-20231103135357-6d8e6e74f818/go.mod h1:yMWB6x4w67wRFIWxTO8ic0131ROnE4dB9VJcwhP4M1Y= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/letsencrypt/boulder v0.0.0-20240104140712-c1f7de06e9f8 h1:VhZp18bDTiwcMExEJnuajfgtjMx03bh/NH7qIplYfdc= +github.com/letsencrypt/boulder v0.0.0-20240104140712-c1f7de06e9f8/go.mod h1:rS3/odUsNpJzEb3gCFNAL32rFXDS4kLeS28vd2x8NfQ= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA= github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg= -github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= -github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= -github.com/mattn/go-sqlite3 v1.14.18 h1:JL0eqdCOq6DJVNPSvArO/bIV9/P7fbGrV00LZHc+5aI= -github.com/mattn/go-sqlite3 v1.14.18/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/mattn/go-sqlite3 v1.14.19 h1:fhGleo2h1p8tVChob4I9HpmVFIAkKGpiukdrgQbWfGI= +github.com/mattn/go-sqlite3 v1.14.19/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= @@ -292,16 +236,18 @@ github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mistifyio/go-zfs/v3 v3.0.1 h1:YaoXgBePoMA12+S1u/ddkv+QqxcfiZK4prI6HPnkFiU= github.com/mistifyio/go-zfs/v3 v3.0.1/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k= -github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/buildkit v0.12.5 h1:RNHH1l3HDhYyZafr5EgstEu8aGNCwyfvMtrQDtjH9T0= +github.com/moby/buildkit v0.12.5/go.mod h1:YGwjA2loqyiYfZeEo8FtI7z4x5XponAaIWsWcSjWwso= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= -github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= -github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= +github.com/moby/sys/mountinfo v0.7.1 h1:/tTvQaSJRr2FshkhXiIpux6fQ2Zvc4j7tAhMTStAG2g= +github.com/moby/sys/mountinfo v0.7.1/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= +github.com/moby/sys/user v0.1.0 h1:WmZ93f5Ux6het5iituh9x2zAG7NFY9Aqi49jjE1PaQg= +github.com/moby/sys/user v0.1.0/go.mod h1:fKJhFOnsCN6xZ5gSfbM6zaHGgDJMrqt9/reuj4T7MmU= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -312,7 +258,6 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= @@ -322,32 +267,31 @@ github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108 github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.12.0 h1:UIVDowFPwpg6yMUpPjGkYvf06K3RAiJXUhCxEwQVHRI= -github.com/onsi/ginkgo/v2 v2.12.0/go.mod h1:ZNEzXISYlqpb8S36iN71ifqLi3vVD1rVJGvWRCJOUpQ= +github.com/onsi/ginkgo/v2 v2.13.2 h1:Bi2gGVkfn6gQcjNjZJVO8Gf0FHzMPf2phUei9tejVMs= +github.com/onsi/ginkgo/v2 v2.13.2/go.mod h1:XStQ8QcGwLyF4HdfcZB8SFOS/MWCgDuXMSBe6zrvLgM= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= -github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= +github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= +github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI= github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= -github.com/opencontainers/runc v1.1.12 h1:BOIssBaW1La0/qbNZHXOOa71dZfZEQOzW7dqQf3phss= -github.com/opencontainers/runc v1.1.12/go.mod h1:S+lQwSfncpBha7XTy/5lBwWgm5+y5Ma/O44Ekby9FK8= -github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg= -github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-tools v0.9.1-0.20230317050512-e931285f4b69 h1:NL4xDvl68WWqQ+8WPMM3l5PsZTxaT7Z4K3VSKDRuAGs= -github.com/opencontainers/runtime-tools v0.9.1-0.20230317050512-e931285f4b69/go.mod h1:bNpfuSHA3DZRtD0TPWO8LzgtLpFPTVA/3jDkzD/OPyk= +github.com/opencontainers/runc v1.2.0-rc.1 h1:SMjop2pxxYRTfKdsigna/8xRoaoCfIQfD2cVuOb64/o= +github.com/opencontainers/runc v1.2.0-rc.1/go.mod h1:m9JwxfHzXz5YTTXBQr7EY9KTuazFAGPyMQx2nRR3vTw= +github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= +github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-tools v0.9.1-0.20230914150019-408c51e934dc h1:d2hUh5O6MRBvStV55MQ8we08t42zSTqBbscoQccWmMc= +github.com/opencontainers/runtime-tools v0.9.1-0.20230914150019-408c51e934dc/go.mod h1:8tx1helyqhUC65McMm3x7HmOex8lO2/v9zPuxmKHurs= github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU= github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= -github.com/openshift/imagebuilder v1.2.5 h1:dby0N3FTouXSBgWNf+gfTkj36fAb8g4iL/SRw1eNAoo= -github.com/openshift/imagebuilder v1.2.5/go.mod h1:bF4w79W8nM+jH1QkAiHSUVaqHkMBJGijafZxCJEHH5o= +github.com/openshift/imagebuilder v1.2.6-0.20231127234745-ef2a5fe47510 h1:ILAESc7vHTVNKctTiR10XC+vACPlR4NbS6570G6QQmY= +github.com/openshift/imagebuilder v1.2.6-0.20231127234745-ef2a5fe47510/go.mod h1:nOaQJMj7VZgdqATqES4GxZX/p6gwK2r7bpE3Ry63+jM= github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f h1:/UDgs8FGMqwnHagNDPGOlts35QkhAZ8by3DR7nMih7M= github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc= -github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU= +github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -366,81 +310,65 @@ github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3c github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= -github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624Y= github.com/sebdah/goldie/v2 v2.5.3/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI= github.com/seccomp/libseccomp-golang v0.10.0 h1:aA4bp+/Zzi0BnWZ2F1wgNBs5gTpm+na2rWM6M9YjLpY= github.com/seccomp/libseccomp-golang v0.10.0/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= -github.com/secure-systems-lab/go-securesystemslib v0.7.0 h1:OwvJ5jQf9LnIAS83waAjPbcMsODrTQUpJ02eNLUoxBg= -github.com/secure-systems-lab/go-securesystemslib v0.7.0/go.mod h1:/2gYnlnHVQ6xeGtfIqFy7Do03K4cdCY0A/GlJLDKLHI= +github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbmfHkLguCE9laoZCUzEEpIZXA= +github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sigstore/fulcio v1.4.3 h1:9JcUCZjjVhRF9fmhVuz6i1RyhCc/EGCD7MOl+iqCJLQ= github.com/sigstore/fulcio v1.4.3/go.mod h1:BQPWo7cfxmJwgaHlphUHUpFkp5+YxeJes82oo39m5og= -github.com/sigstore/rekor v1.3.3 h1:pLZ0UjutL7SUdeiysmJCabnRqvI7DsIxnJj8c/+e0Fk= -github.com/sigstore/rekor v1.3.3/go.mod h1:GO3udo2Xiu3/Uz4/U3vgjVq7w5Yq7eSpAFP1z7gE+yA= -github.com/sigstore/sigstore v1.7.5 h1:ij55dBhLwjICmLTBJZm7SqoQLdsu/oowDanACcJNs48= -github.com/sigstore/sigstore v1.7.5/go.mod h1:9OCmYWhzuq/G4e1cy9m297tuMRJ1LExyrXY3ZC3Zt/s= -github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sigstore/rekor v1.3.4 h1:RGIia1iOZU7fOiiP2UY/WFYhhp50S5aUm7YrM8aiA6E= +github.com/sigstore/rekor v1.3.4/go.mod h1:1GubPVO2yO+K0m0wt/3SHFqnilr/hWbsjSOe7Vzxrlg= +github.com/sigstore/sigstore v1.8.0 h1:sSRWXv1JiDsK4T2wNWVYcvKCgxcSrhQ/QUJxsfCO4OM= +github.com/sigstore/sigstore v1.8.0/go.mod h1:l12B1gFlLIpBIVeqk/q1Lb+6YSOGNuN3xLExIjYH+qc= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= -github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= +github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 h1:pnnLyeX7o/5aX8qUQ69P/mLojDqwda8hFOCBTmP/6hw= github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6/go.mod h1:39R/xuhNgVhi+K0/zst4TLrJrVmbm6LVgl4A0+ZFS5M= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/sylabs/sif/v2 v2.15.0 h1:Nv0tzksFnoQiQ2eUwpAis9nVqEu4c3RcNSxX8P3Cecw= -github.com/sylabs/sif/v2 v2.15.0/go.mod h1:X1H7eaPz6BAxA84POMESXoXfTqgAnLQkujyF/CQFWTc= +github.com/sylabs/sif/v2 v2.15.1 h1:75BcunPOY11fVhe02/WHuNLTfDd3OHH0ex0MuuNMYX0= +github.com/sylabs/sif/v2 v2.15.1/go.mod h1:YiwCUdZOhiohnPbyxuxvCZa+03HwAaiC+vfAKZPR8nQ= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes= github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= -github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= -github.com/vanilla-os/prometheus v0.1.6 h1:FU+Ts+Hxwz6C0uflD05s836CxKytclTD56ZHzgxpp70= -github.com/vanilla-os/prometheus v0.1.6/go.mod h1:A65eiUB5ZJa1PLJeEW7gLuNJKKXJBFIvXnSEa5rbMQE= +github.com/vanilla-os/prometheus v1.0.0 h1:je+vWK9Ir1SzCIz2yoSE3tFmGKli4Q1z/LQ5mNk/+Pc= +github.com/vanilla-os/prometheus v1.0.0/go.mod h1:CPWeG0LJW4gHcLHSkqx9jANi5574a03R3ihWNy4PIig= github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts= github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk= -github.com/vbauerster/mpb/v8 v8.6.2 h1:9EhnJGQRtvgDVCychJgR96EDCOqgg2NsMuk5JUcX4DA= -github.com/vbauerster/mpb/v8 v8.6.2/go.mod h1:oVJ7T+dib99kZ/VBjoBaC8aPXiSAihnzuKmotuihyFo= +github.com/vbauerster/mpb/v8 v8.7.1 h1:bQoSMMTFAg/gjsLrBYmO8gbRcZt7aDq6WI2IMa9BTqM= +github.com/vbauerster/mpb/v8 v8.7.1/go.mod h1:fWgXcAu4W+0cBSUh4ZlaKJyC2KtgU27ZSTaiIk0QNsQ= github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs= github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= -github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= -github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= -github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= -github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= @@ -454,35 +382,30 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA= go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= -go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= -go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= -go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= -go.mongodb.org/mongo-driver v1.12.1 h1:nLkghSU8fQNaK7oUmDhQFsnrtcoNy7Z6LVFKsEecqgE= -go.mongodb.org/mongo-driver v1.12.1/go.mod h1:/rGBTebI3XYboVmgz+Wv3Bcbl3aD0QF9zl6kDDw18rQ= +go.mongodb.org/mongo-driver v1.13.1 h1:YIc7HTYsKndGK4RFzJ3covLz1byri52x0IoMB0Pt/vk= +go.mongodb.org/mongo-driver v1.13.1/go.mod h1:wcDf1JBCXy2mOW0bWHwO/IOYqdca1MPCwDtFu/Z9+eo= go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 h1:CCriYyAfq1Br1aIYettdHZTy8mBTIPo7We18TuO/bak= go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= -golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= -golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= -golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= +golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc h1:ao2WRsKSzW6KuUY9IWPwWahcHCgR0s52IfwutMfEbdM= +golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -497,35 +420,28 @@ golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= -golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= -golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -536,7 +452,6 @@ golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -544,39 +459,42 @@ golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= -golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= -golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA= +golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -586,15 +504,15 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405 h1:AB/lmRny7e2pLhFEYIbl5qkDAUt2h0ZRO4wGPhZf+ik= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405/go.mod h1:67X1fPuzjcrkymZzZV1vvkFeTn2Rvc6lYF9MYFGCcwE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917 h1:6G8oQ016D88m1xAKljMlBOOGWDZkes4kMhgGFlf8WcQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917/go.mod h1:xtjpI3tXFPP051KaWnhvxkiubL/6dJ18vLVf7q2pTOU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= -google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= +google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU= +google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -606,28 +524,21 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/go-jose/go-jose.v2 v2.6.1 h1:qEzJlIDmG9q5VO0M/o8tGS65QMHMS1w01TQJB1VPJ4U= -gopkg.in/go-jose/go-jose.v2 v2.6.1/go.mod h1:zzZDPkNNw/c9IE7Z9jr11mBZQhKQTMzoEEIoEdZlFBI= +gopkg.in/go-jose/go-jose.v2 v2.6.3 h1:nt80fvSDlhKWQgSWyHyy5CfmlQr+asih51R8PTWNKKs= +gopkg.in/go-jose/go-jose.v2 v2.6.3/go.mod h1:zzZDPkNNw/c9IE7Z9jr11mBZQhKQTMzoEEIoEdZlFBI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= @@ -636,3 +547,5 @@ gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY= gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +tags.cncf.io/container-device-interface v0.6.2 h1:dThE6dtp/93ZDGhqaED2Pu374SOeUkBfuvkLuiTdwzg= +tags.cncf.io/container-device-interface v0.6.2/go.mod h1:Shusyhjs1A5Na/kqPVLL0KqnHQHuunol9LFeUNkuGVE= diff --git a/vendor/github.com/Microsoft/hcsshim/.golangci.yml b/vendor/github.com/Microsoft/hcsshim/.golangci.yml index abe77f57..7d38a2fb 100644 --- a/vendor/github.com/Microsoft/hcsshim/.golangci.yml +++ b/vendor/github.com/Microsoft/hcsshim/.golangci.yml @@ -20,6 +20,7 @@ linters: # - typecheck # - unused + - errorlint # error wrapping (eg, not using `errors.Is`, using `%s` instead of `%w` in `fmt.Errorf`) - gofmt # whether code was gofmt-ed - govet # enabled by default, but just to be sure - nolintlint # ill-formed or insufficient nolint directives @@ -53,6 +54,12 @@ issues: text: "^ST1003: should not use underscores in package names$" source: "^package cri_containerd$" + # don't bother with propper error wrapping in test code + - path: cri-containerd + linters: + - errorlint + text: "non-wrapping format verb for fmt.Errorf" + # This repo has a LOT of generated schema files, operating system bindings, and other # things that ST1003 from stylecheck won't like (screaming case Windows api constants for example). # There's also some structs that we *could* change the initialisms to be Go friendly diff --git a/vendor/github.com/Microsoft/hcsshim/README.md b/vendor/github.com/Microsoft/hcsshim/README.md index 5a136153..32043804 100644 --- a/vendor/github.com/Microsoft/hcsshim/README.md +++ b/vendor/github.com/Microsoft/hcsshim/README.md @@ -9,15 +9,18 @@ It is primarily used in the [Moby](https://github.com/moby/moby) and [Containerd ## Building While this repository can be used as a library of sorts to call the HCS apis, there are a couple binaries built out of the repository as well. The main ones being the Linux guest agent, and an implementation of the [runtime v2 containerd shim api](https://github.com/containerd/containerd/blob/master/runtime/v2/README.md). + ### Linux Hyper-V Container Guest Agent To build the Linux guest agent itself all that's needed is to set your GOOS to "Linux" and build out of ./cmd/gcs. + ```powershell C:\> $env:GOOS="linux" C:\> go build .\cmd\gcs\ ``` or on a Linux machine + ```sh > go build ./cmd/gcs ``` @@ -33,13 +36,15 @@ make all ``` If the build is successful, in the `./out` folder you should see: + ```sh > ls ./out/ delta.tar.gz initrd.img rootfs.tar.gz ``` ### Containerd Shim -For info on the Runtime V2 API: https://github.com/containerd/containerd/blob/master/runtime/v2/README.md. + +For info on the [Runtime V2 API](https://github.com/containerd/containerd/blob/master/runtime/v2/README.md). Contrary to the typical Linux architecture of shim -> runc, the runhcs shim is used both to launch and manage the lifetime of containers. @@ -48,7 +53,9 @@ C:\> $env:GOOS="windows" C:\> go build .\cmd\containerd-shim-runhcs-v1 ``` -Then place the binary in the same directory that Containerd is located at in your environment. A default Containerd configuration file can be generated by running: +Then place the binary in the same directory that Containerd is located at in your environment. +A default Containerd configuration file can be generated by running: + ```powershell .\containerd.exe config default | Out-File "C:\Program Files\containerd\config.toml" -Encoding ascii ``` @@ -56,6 +63,7 @@ Then place the binary in the same directory that Containerd is located at in you This config file will already have the shim set as the default runtime for cri interactions. To trial using the shim out with ctr.exe: + ```powershell C:\> ctr.exe run --runtime io.containerd.runhcs.v1 --rm mcr.microsoft.com/windows/nanoserver:2004 windows-test cmd /c "echo Hello World!" ``` @@ -64,16 +72,69 @@ C:\> ctr.exe run --runtime io.containerd.runhcs.v1 --rm mcr.microsoft.com/window This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us -the rights to use your contribution. For details, visit https://cla.microsoft.com. +the rights to use your contribution. For details, visit [Microsoft CLA](https://cla.microsoft.com). When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA. -We also require that contributors [sign their commits](https://git-scm.com/docs/git-commit) using `git commit -s` or `git commit --signoff` to -certify they either authored the work themselves or otherwise have permission to use it in this project. Please see https://developercertificate.org/ for -more info, as well as to make sure that you can attest to the rules listed. Our CI uses the [DCO Github app](https://github.com/apps/dco) to ensure -that all commits in a given PR are signed-off. +We require that contributors sign their commits +to certify they either authored the work themselves or otherwise have permission to use it in this project. + +We also require that contributors sign their commits using using [`git commit --signoff`][git-commit-s] +to certify they either authored the work themselves or otherwise have permission to use it in this project. +A range of commits can be signed off using [`git rebase --signoff`][git-rebase-s]. + +Please see [the developer certificate](https://developercertificate.org) for more info, +as well as to make sure that you can attest to the rules listed. +Our CI uses the [DCO Github app](https://github.com/apps/dco) to ensure that all commits in a given PR are signed-off. + +### Linting + +Code must pass a linting stage, which uses [`golangci-lint`][lint]. +Since `./test` is a separate Go module, the linter is run from both the root and the +`test` directories. Additionally, the linter is run with `GOOS` set to both `windows` and +`linux`. + +The linting settings are stored in [`.golangci.yaml`](./.golangci.yaml), and can be run +automatically with VSCode by adding the following to your workspace or folder settings: + +```json + "go.lintTool": "golangci-lint", + "go.lintOnSave": "package", +``` + +Additional editor [integrations options are also available][lint-ide]. + +Alternatively, `golangci-lint` can be [installed][lint-install] and run locally: + +```shell +# use . or specify a path to only lint a package +# to show all lint errors, use flags "--max-issues-per-linter=0 --max-same-issues=0" +> golangci-lint run +``` + +To run across the entire repo for both `GOOS=windows` and `linux`: + +```powershell +> foreach ( $goos in ('windows', 'linux') ) { + foreach ( $repo in ('.', 'test') ) { + pwsh -Command "cd $repo && go env -w GOOS=$goos && golangci-lint.exe run --verbose" + } +} +``` + +### Go Generate + +The pipeline checks that auto-generated code, via `go generate`, are up to date. +Similar to the [linting stage](#linting), `go generate` is run in both the root and test Go modules. + +This can be done via: + +```shell +> go generate ./... +> cd test && go generate ./... +``` ## Code of Conduct @@ -83,7 +144,7 @@ contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additio ## Dependencies -This project requires Golang 1.17 or newer to build. +This project requires Golang 1.18 or newer to build. For system requirements to run this project, see the Microsoft docs on [Windows Container requirements](https://docs.microsoft.com/en-us/virtualization/windowscontainers/deploy-containers/system-requirements). @@ -100,3 +161,10 @@ For additional details, see [Report a Computer Security Vulnerability](https://t --------------- Copyright (c) 2018 Microsoft Corp. All rights reserved. + +[lint]: https://golangci-lint.run/ +[lint-ide]: https://golangci-lint.run/usage/integrations/#editor-integration +[lint-install]: https://golangci-lint.run/usage/install/#local-installation + +[git-commit-s]: https://git-scm.com/docs/git-commit#Documentation/git-commit.txt--s +[git-rebase-s]: https://git-scm.com/docs/git-rebase#Documentation/git-rebase.txt---signoff diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/attach.go b/vendor/github.com/Microsoft/hcsshim/computestorage/attach.go index 54c4b3bc..301a1088 100644 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/attach.go +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/attach.go @@ -38,3 +38,31 @@ func AttachLayerStorageFilter(ctx context.Context, layerPath string, layerData L } return nil } + +// AttachOverlayFilter sets up a filter of the given type on a writable container layer. Currently the only +// supported filter types are WCIFS & UnionFS (defined in internal/hcs/schema2/layer.go) +// +// `volumePath` is volume path at which writable layer is mounted. If the +// path does not end in a `\` the platform will append it automatically. +// +// `layerData` is the parent read-only layer data. +func AttachOverlayFilter(ctx context.Context, volumePath string, layerData LayerData) (err error) { + title := "hcsshim::AttachOverlayFilter" + ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("volumePath", volumePath), + ) + + bytes, err := json.Marshal(layerData) + if err != nil { + return err + } + + err = hcsAttachOverlayFilter(volumePath, string(bytes)) + if err != nil { + return errors.Wrap(err, "failed to attach overlay filter") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/detach.go b/vendor/github.com/Microsoft/hcsshim/computestorage/detach.go index daf1bfff..6e00e4a1 100644 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/detach.go +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/detach.go @@ -4,7 +4,9 @@ package computestorage import ( "context" + "encoding/json" + hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" "github.com/Microsoft/hcsshim/internal/oc" "github.com/pkg/errors" "go.opencensus.io/trace" @@ -26,3 +28,27 @@ func DetachLayerStorageFilter(ctx context.Context, layerPath string) (err error) } return nil } + +// DetachOverlayFilter detaches the filter on a writable container layer. +// +// `volumePath` is a path to writable container volume. +func DetachOverlayFilter(ctx context.Context, volumePath string, filterType hcsschema.FileSystemFilterType) (err error) { + title := "hcsshim::DetachOverlayFilter" + ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("volumePath", volumePath)) + + layerData := LayerData{} + layerData.FilterType = filterType + bytes, err := json.Marshal(layerData) + if err != nil { + return err + } + + err = hcsDetachOverlayFilter(volumePath, string(bytes)) + if err != nil { + return errors.Wrap(err, "failed to detach overlay filter") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/storage.go b/vendor/github.com/Microsoft/hcsshim/computestorage/storage.go index c38d3aa5..5af931f2 100644 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/storage.go +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/storage.go @@ -19,14 +19,17 @@ import ( //sys hcsFormatWritableLayerVhd(handle windows.Handle) (hr error) = computestorage.HcsFormatWritableLayerVhd? //sys hcsGetLayerVhdMountPath(vhdHandle windows.Handle, mountPath **uint16) (hr error) = computestorage.HcsGetLayerVhdMountPath? //sys hcsSetupBaseOSVolume(layerPath string, volumePath string, options string) (hr error) = computestorage.HcsSetupBaseOSVolume? +//sys hcsAttachOverlayFilter(volumePath string, layerData string) (hr error) = computestorage.HcsAttachOverlayFilter? +//sys hcsDetachOverlayFilter(volumePath string, layerData string) (hr error) = computestorage.HcsDetachOverlayFilter? type Version = hcsschema.Version type Layer = hcsschema.Layer // LayerData is the data used to describe parent layer information. type LayerData struct { - SchemaVersion Version `json:"SchemaVersion,omitempty"` - Layers []Layer `json:"Layers,omitempty"` + SchemaVersion Version `json:"SchemaVersion,omitempty"` + Layers []Layer `json:"Layers,omitempty"` + FilterType hcsschema.FileSystemFilterType `json:"FilterType,omitempty"` } // ExportLayerOptions are the set of options that are used with the `computestorage.HcsExportLayer` syscall. diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go index b996b35e..53d0beb8 100644 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go @@ -43,8 +43,10 @@ var ( modcomputestorage = windows.NewLazySystemDLL("computestorage.dll") procHcsAttachLayerStorageFilter = modcomputestorage.NewProc("HcsAttachLayerStorageFilter") + procHcsAttachOverlayFilter = modcomputestorage.NewProc("HcsAttachOverlayFilter") procHcsDestroyLayer = modcomputestorage.NewProc("HcsDestroyLayer") procHcsDetachLayerStorageFilter = modcomputestorage.NewProc("HcsDetachLayerStorageFilter") + procHcsDetachOverlayFilter = modcomputestorage.NewProc("HcsDetachOverlayFilter") procHcsExportLayer = modcomputestorage.NewProc("HcsExportLayer") procHcsFormatWritableLayerVhd = modcomputestorage.NewProc("HcsFormatWritableLayerVhd") procHcsGetLayerVhdMountPath = modcomputestorage.NewProc("HcsGetLayerVhdMountPath") @@ -83,6 +85,35 @@ func _hcsAttachLayerStorageFilter(layerPath *uint16, layerData *uint16) (hr erro return } +func hcsAttachOverlayFilter(volumePath string, layerData string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(volumePath) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(layerData) + if hr != nil { + return + } + return _hcsAttachOverlayFilter(_p0, _p1) +} + +func _hcsAttachOverlayFilter(volumePath *uint16, layerData *uint16) (hr error) { + hr = procHcsAttachOverlayFilter.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsAttachOverlayFilter.Addr(), 2, uintptr(unsafe.Pointer(volumePath)), uintptr(unsafe.Pointer(layerData)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + func hcsDestroyLayer(layerPath string) (hr error) { var _p0 *uint16 _p0, hr = syscall.UTF16PtrFromString(layerPath) @@ -131,6 +162,35 @@ func _hcsDetachLayerStorageFilter(layerPath *uint16) (hr error) { return } +func hcsDetachOverlayFilter(volumePath string, layerData string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(volumePath) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(layerData) + if hr != nil { + return + } + return _hcsDetachOverlayFilter(_p0, _p1) +} + +func _hcsDetachOverlayFilter(volumePath *uint16, layerData *uint16) (hr error) { + hr = procHcsDetachOverlayFilter.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsDetachOverlayFilter.Addr(), 2, uintptr(unsafe.Pointer(volumePath)), uintptr(unsafe.Pointer(layerData)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + func hcsExportLayer(layerPath string, exportFolderPath string, layerData string, options string) (hr error) { var _p0 *uint16 _p0, hr = syscall.UTF16PtrFromString(layerPath) diff --git a/vendor/github.com/Microsoft/hcsshim/container.go b/vendor/github.com/Microsoft/hcsshim/container.go index c8f09f88..0ad7f495 100644 --- a/vendor/github.com/Microsoft/hcsshim/container.go +++ b/vendor/github.com/Microsoft/hcsshim/container.go @@ -75,7 +75,7 @@ func init() { func CreateContainer(id string, c *ContainerConfig) (Container, error) { fullConfig, err := mergemaps.MergeJSON(c, createContainerAdditionalJSON) if err != nil { - return nil, fmt.Errorf("failed to merge additional JSON '%s': %s", createContainerAdditionalJSON, err) + return nil, fmt.Errorf("failed to merge additional JSON '%s': %w", createContainerAdditionalJSON, err) } system, err := hcs.CreateComputeSystem(context.Background(), id, fullConfig) diff --git a/vendor/github.com/Microsoft/hcsshim/errors.go b/vendor/github.com/Microsoft/hcsshim/errors.go index 594bbfb7..b441b0cd 100644 --- a/vendor/github.com/Microsoft/hcsshim/errors.go +++ b/vendor/github.com/Microsoft/hcsshim/errors.go @@ -115,6 +115,7 @@ func (e *ContainerError) Error() string { s += " encountered an error during " + e.Operation } + //nolint:errorlint // legacy code switch e.Err.(type) { case nil: break @@ -145,6 +146,7 @@ func (e *ProcessError) Error() string { s += " encountered an error during " + e.Operation } + //nolint:errorlint // legacy code switch e.Err.(type) { case nil: break @@ -166,10 +168,10 @@ func (e *ProcessError) Error() string { // already exited, or does not exist. Both IsAlreadyStopped and IsNotExist // will currently return true when the error is ErrElementNotFound. func IsNotExist(err error) bool { - if _, ok := err.(EndpointNotFoundError); ok { + if _, ok := err.(EndpointNotFoundError); ok { //nolint:errorlint // legacy code return true } - if _, ok := err.(NetworkNotFoundError); ok { + if _, ok := err.(NetworkNotFoundError); ok { //nolint:errorlint // legacy code return true } return hcs.IsNotExist(getInnerError(err)) @@ -224,6 +226,7 @@ func IsAccessIsDenied(err error) bool { } func getInnerError(err error) error { + //nolint:errorlint // legacy code switch pe := err.(type) { case nil: return nil @@ -236,14 +239,14 @@ func getInnerError(err error) error { } func convertSystemError(err error, c *container) error { - if serr, ok := err.(*hcs.SystemError); ok { + if serr, ok := err.(*hcs.SystemError); ok { //nolint:errorlint // legacy code return &ContainerError{Container: c, Operation: serr.Op, Err: serr.Err, Events: serr.Events} } return err } func convertProcessError(err error, p *process) error { - if perr, ok := err.(*hcs.ProcessError); ok { + if perr, ok := err.(*hcs.ProcessError); ok { //nolint:errorlint // legacy code return &ProcessError{Process: p, Operation: perr.Op, Err: perr.Err, Events: perr.Events} } return err diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go index 37afbf69..8ef611d6 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go @@ -63,7 +63,7 @@ func (process *Process) SystemID() string { } func (process *Process) processSignalResult(ctx context.Context, err error) (bool, error) { - switch err { + switch err { //nolint:errorlint case nil: return true, nil case ErrVmcomputeOperationInvalidState, ErrComputeSystemDoesNotExist, ErrElementNotFound: diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/layer.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/layer.go index 176c49d4..cb8dea08 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/layer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/layer.go @@ -9,6 +9,13 @@ package hcsschema +type FileSystemFilterType string + +const ( + UnionFS FileSystemFilterType = "UnionFS" + WCIFS FileSystemFilterType = "WCIFS" +) + type Layer struct { Id string `json:"Id,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_hive.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_hive.go new file mode 100644 index 00000000..e7b605fd --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_hive.go @@ -0,0 +1,13 @@ +package hcsschema + +// NOTE: manually added + +type RegistryHive string + +// List of RegistryHive +const ( + RegistryHive_SYSTEM RegistryHive = "System" + RegistryHive_SOFTWARE RegistryHive = "Software" + RegistryHive_SECURITY RegistryHive = "Security" + RegistryHive_SAM RegistryHive = "Sam" +) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_key.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_key.go index 26fde99c..1883444a 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_key.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_key.go @@ -10,7 +10,7 @@ package hcsschema type RegistryKey struct { - Hive string `json:"Hive,omitempty"` + Hive RegistryHive `json:"Hive,omitempty"` Name string `json:"Name,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value.go index 3f203176..13f24d53 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value.go @@ -14,7 +14,7 @@ type RegistryValue struct { Name string `json:"Name,omitempty"` - Type_ string `json:"Type,omitempty"` + Type_ RegistryValueType `json:"Type,omitempty"` // One and only one value type must be set. StringValue string `json:"StringValue,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value_type.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value_type.go new file mode 100644 index 00000000..c8b4f6c9 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value_type.go @@ -0,0 +1,17 @@ +package hcsschema + +// NOTE: manually added + +type RegistryValueType string + +// List of RegistryValueType +const ( + RegistryValueType_NONE RegistryValueType = "None" + RegistryValueType_STRING RegistryValueType = "String" + RegistryValueType_EXPANDED_STRING RegistryValueType = "ExpandedString" + RegistryValueType_MULTI_STRING RegistryValueType = "MultiString" + RegistryValueType_BINARY RegistryValueType = "Binary" + RegistryValueType_D_WORD RegistryValueType = "DWord" + RegistryValueType_Q_WORD RegistryValueType = "QWord" + RegistryValueType_CUSTOM_TYPE RegistryValueType = "CustomType" +) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go index cf1db7da..81d60ed4 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go @@ -97,7 +97,7 @@ func CreateComputeSystem(ctx context.Context, id string, hcsDocumentInterface in events, err := processAsyncHcsResult(ctx, createError, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemCreateCompleted, &timeout.SystemCreate) if err != nil { - if err == ErrTimeout { + if errors.Is(err, ErrTimeout) { // Terminate the compute system if it still exists. We're okay to // ignore a failure here. _ = computeSystem.Terminate(ctx) @@ -238,7 +238,7 @@ func (computeSystem *System) Shutdown(ctx context.Context) error { resultJSON, err := vmcompute.HcsShutdownComputeSystem(ctx, computeSystem.handle, "") events := processHcsResult(ctx, resultJSON) - switch err { + switch err { //nolint:errorlint case nil, ErrVmcomputeAlreadyStopped, ErrComputeSystemDoesNotExist, ErrVmcomputeOperationPending: default: return makeSystemError(computeSystem, operation, err, events) @@ -259,7 +259,7 @@ func (computeSystem *System) Terminate(ctx context.Context) error { resultJSON, err := vmcompute.HcsTerminateComputeSystem(ctx, computeSystem.handle, "") events := processHcsResult(ctx, resultJSON) - switch err { + switch err { //nolint:errorlint case nil, ErrVmcomputeAlreadyStopped, ErrComputeSystemDoesNotExist, ErrVmcomputeOperationPending: default: return makeSystemError(computeSystem, operation, err, events) @@ -279,7 +279,7 @@ func (computeSystem *System) waitBackground() { span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) err := waitForNotification(ctx, computeSystem.callbackNumber, hcsNotificationSystemExited, nil) - switch err { + switch err { //nolint:errorlint case nil: log.G(ctx).Debug("system exited") case ErrVmcomputeUnexpectedExit: diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go index 0a8f36d8..e61dc8de 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go @@ -31,7 +31,7 @@ func hnsCallRawResponse(method, path, request string) (*hnsResponse, error) { func hnsCall(method, path, request string, returnResponse interface{}) error { hnsresponse, err := hnsCallRawResponse(method, path, request) if err != nil { - return fmt.Errorf("failed during hnsCallRawResponse: %v", err) + return fmt.Errorf("failed during hnsCallRawResponse: %w", err) } if !hnsresponse.Success { return fmt.Errorf("hns failed with error : %s", hnsresponse.Error) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go index 749588ad..a64b6792 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go @@ -56,7 +56,7 @@ func issueNamespaceRequest(id *string, method, subpath string, request interface if strings.Contains(err.Error(), "Element not found.") { return nil, os.ErrNotExist } - return nil, fmt.Errorf("%s %s: %s", method, hnspath, err) + return nil, fmt.Errorf("%s %s: %w", method, hnspath, err) } return &ns, err } @@ -86,7 +86,7 @@ func GetNamespaceEndpoints(id string) ([]string, error) { var endpoint namespaceEndpointRequest err = json.Unmarshal(rsrc.Data, &endpoint) if err != nil { - return nil, fmt.Errorf("unmarshal endpoint: %s", err) + return nil, fmt.Errorf("unmarshal endpoint: %w", err) } endpoints = append(endpoints, endpoint.ID) } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go index bcca84b0..eae3cc50 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go @@ -4,6 +4,7 @@ package jobobject import ( "context" + "errors" "fmt" "sync" "unsafe" @@ -59,7 +60,7 @@ func pollIOCP(ctx context.Context, iocpHandle windows.Handle) { }).Warn("failed to parse job object message") continue } - if err := msq.Enqueue(notification); err == queue.ErrQueueClosed { + if err := msq.Enqueue(notification); errors.Is(err, queue.ErrQueueClosed) { // Write will only return an error when the queue is closed. // The only time a queue would ever be closed is when we call `Close` on // the job it belongs to which also removes it from the jobMap, so something diff --git a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go index 4a224fbe..10ae4d67 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go @@ -374,7 +374,7 @@ func (job *JobObject) Pids() ([]uint32, error) { return []uint32{}, nil } - if err != winapi.ERROR_MORE_DATA { + if err != winapi.ERROR_MORE_DATA { //nolint:errorlint return nil, fmt.Errorf("failed initial query for PIDs in job object: %w", err) } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go index 03f71d9a..e3b1a1ed 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go @@ -143,6 +143,13 @@ func (job *JobObject) SetCPUAffinity(affinityBitMask uint64) error { return err } info.BasicLimitInformation.LimitFlags |= uint32(windows.JOB_OBJECT_LIMIT_AFFINITY) + + // We really, really shouldn't be running on 32 bit, but just in case (and to satisfy CodeQL) ... + const maxUintptr = ^uintptr(0) + if affinityBitMask > uint64(maxUintptr) { + return fmt.Errorf("affinity bitmask (%d) exceeds max allowable value (%d)", affinityBitMask, maxUintptr) + } + info.BasicLimitInformation.Affinity = uintptr(affinityBitMask) return job.setExtendedInformation(info) } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/log/format.go b/vendor/github.com/Microsoft/hcsshim/internal/log/format.go index 6d69c15b..1ceb26ba 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/log/format.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/log/format.go @@ -104,6 +104,7 @@ func encode(v interface{}) (_ []byte, err error) { if jErr := enc.Encode(v); jErr != nil { if err != nil { // TODO (go1.20): use multierror via fmt.Errorf("...: %w; ...: %w", ...) + //nolint:errorlint // non-wrapping format verb for fmt.Errorf return nil, fmt.Errorf("protojson encoding: %v; json encoding: %w", err, jErr) } return nil, fmt.Errorf("json encoding: %w", jErr) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go b/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go index 3e175e52..cceb3e2d 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go @@ -46,6 +46,7 @@ const ( ExpectedType = "expected-type" Bool = "bool" + Int32 = "int32" Uint32 = "uint32" Uint64 = "uint64" diff --git a/vendor/github.com/Microsoft/hcsshim/internal/memory/pool.go b/vendor/github.com/Microsoft/hcsshim/internal/memory/pool.go index 1ef5814d..6d39ca3b 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/memory/pool.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/memory/pool.go @@ -126,7 +126,7 @@ func (pa *PoolAllocator) Allocate(size uint64) (MappedRegion, error) { // this means that there are no more regions for the current class, try expanding if nextCls != memCls { if err := pa.split(memCls); err != nil { - if err == ErrInvalidMemoryClass { + if errors.Is(err, ErrInvalidMemoryClass) { return nil, ErrNotEnoughSpace } return nil, err @@ -147,7 +147,7 @@ func (pa *PoolAllocator) Allocate(size uint64) (MappedRegion, error) { } // Release marks a memory region of class `memCls` and offset `offset` as free and tries to merge smaller regions into -// a bigger one +// a bigger one. func (pa *PoolAllocator) Release(reg MappedRegion) error { mp := pa.pools[reg.Type()] if mp == nil { @@ -164,7 +164,7 @@ func (pa *PoolAllocator) Release(reg MappedRegion) error { return ErrNotAllocated } if err := pa.merge(n.parent); err != nil { - if err != ErrEarlyMerge { + if !errors.Is(err, ErrEarlyMerge) { return err } } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go b/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go index 70368533..b087b987 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go @@ -243,7 +243,7 @@ func RemoveRelative(path string, root *os.File) error { if err == nil { defer f.Close() err = deleteOnClose(f) - if err == syscall.ERROR_ACCESS_DENIED { + if err == syscall.ERROR_ACCESS_DENIED { //nolint:errorlint // Maybe the file is marked readonly. Clear the bit and retry. _ = clearReadOnly(f) err = deleteOnClose(f) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go b/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go index 79b14ef9..67ca897c 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go @@ -104,7 +104,7 @@ func execute(ctx gcontext.Context, timeout time.Duration, f func() error) error }() select { case <-ctx.Done(): - if ctx.Err() == gcontext.DeadlineExceeded { + if ctx.Err() == gcontext.DeadlineExceeded { //nolint:errorlint log.G(ctx).WithField(logfields.Timeout, trueTimeout). Warning("Syscall did not complete within operation timeout. This may indicate a platform issue. " + "If it appears to be making no forward progress, obtain the stacks and see if there is a syscall " + @@ -150,7 +150,7 @@ func HcsCreateComputeSystem(ctx gcontext.Context, id string, configuration strin if result != "" { span.AddAttributes(trace.StringAttribute("result", result)) } - if hr != errVmcomputeOperationPending { + if hr != errVmcomputeOperationPending { //nolint:errorlint // explicitly returned oc.SetSpanStatus(span, hr) } }() @@ -205,7 +205,7 @@ func HcsStartComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, option if result != "" { span.AddAttributes(trace.StringAttribute("result", result)) } - if hr != errVmcomputeOperationPending { + if hr != errVmcomputeOperationPending { //nolint:errorlint // explicitly returned oc.SetSpanStatus(span, hr) } }() @@ -228,7 +228,7 @@ func HcsShutdownComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, opt if result != "" { span.AddAttributes(trace.StringAttribute("result", result)) } - if hr != errVmcomputeOperationPending { + if hr != errVmcomputeOperationPending { //nolint:errorlint // explicitly returned oc.SetSpanStatus(span, hr) } }() @@ -251,7 +251,7 @@ func HcsTerminateComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, op if result != "" { span.AddAttributes(trace.StringAttribute("result", result)) } - if hr != errVmcomputeOperationPending { + if hr != errVmcomputeOperationPending { //nolint:errorlint // explicitly returned oc.SetSpanStatus(span, hr) } }() @@ -274,7 +274,7 @@ func HcsPauseComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, option if result != "" { span.AddAttributes(trace.StringAttribute("result", result)) } - if hr != errVmcomputeOperationPending { + if hr != errVmcomputeOperationPending { //nolint:errorlint // explicitly returned oc.SetSpanStatus(span, hr) } }() @@ -297,7 +297,7 @@ func HcsResumeComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, optio if result != "" { span.AddAttributes(trace.StringAttribute("result", result)) } - if hr != errVmcomputeOperationPending { + if hr != errVmcomputeOperationPending { //nolint:errorlint // explicitly returned oc.SetSpanStatus(span, hr) } }() @@ -621,7 +621,7 @@ func HcsSaveComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options if result != "" { span.AddAttributes(trace.StringAttribute("result", result)) } - if hr != errVmcomputeOperationPending { + if hr != errVmcomputeOperationPending { //nolint:errorlint // explicitly returned oc.SetSpanStatus(span, hr) } }() diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayerreader.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayerreader.go index 792f13f5..807b7de1 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayerreader.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayerreader.go @@ -1,3 +1,5 @@ +//go:build windows + package wclayer import ( @@ -64,7 +66,7 @@ func (r *baseLayerReader) walkUntilCancelled() error { return nil }) - if err == errorIterationCanceled { + if err == errorIterationCanceled { //nolint:errorlint // explicitly returned return nil } @@ -103,7 +105,7 @@ func (r *baseLayerReader) walkUntilCancelled() error { return nil }) - if err == errorIterationCanceled { + if err == errorIterationCanceled { //nolint:errorlint // explicitly returned return nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/converttobaselayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/converttobaselayer.go index c542f556..d25c3c52 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/converttobaselayer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/converttobaselayer.go @@ -1,3 +1,5 @@ +//go:build windows + package wclayer import ( diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go index e2ec27ad..35fcbedb 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go @@ -11,7 +11,6 @@ import ( "github.com/Microsoft/hcsshim/internal/hcserror" "github.com/Microsoft/hcsshim/internal/oc" - "github.com/Microsoft/hcsshim/osversion" "go.opencensus.io/trace" ) @@ -30,14 +29,17 @@ func ExpandScratchSize(ctx context.Context, path string, size uint64) (err error return hcserror.New(err, title, "") } - // Manually expand the volume now in order to work around bugs in 19H1 and - // prerelease versions of Vb. Remove once this is fixed in Windows. - if build := osversion.Build(); build >= osversion.V19H1 && build < 19020 { - err = expandSandboxVolume(ctx, path) - if err != nil { - return err - } + // Always expand the volume too. In case of legacy layers not expanding the volume here works because + // the PrepareLayer call internally handles the expansion. However, in other cases (like CimFS) we + // don't call PrepareLayer and so the volume will never be expanded. This also means in case of + // legacy layers, we might have a small perf hit because the VHD is mounted twice for expansion (once + // here and once during the PrepareLayer call). But as long as the perf hit is minimal, we should be + // okay. + err = expandSandboxVolume(ctx, path) + if err != nil { + return err } + return nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go index 807d8331..fc12eeba 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go @@ -154,7 +154,7 @@ func (r *legacyLayerReader) walkUntilCancelled() error { } return nil }) - if err == errorIterationCanceled { + if err == errorIterationCanceled { //nolint:errorlint // explicitly returned return nil } if err == nil { @@ -196,7 +196,7 @@ func findBackupStreamSize(r io.Reader) (int64, error) { for { hdr, err := br.Next() if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { err = nil } return 0, err @@ -428,7 +428,7 @@ func (w *legacyLayerWriter) initUtilityVM() error { // immutable. err = cloneTree(w.parentRoots[0], w.destRoot, UtilityVMFilesPath, mutatedUtilityVMFiles) if err != nil { - return fmt.Errorf("cloning the parent utility VM image failed: %s", err) + return fmt.Errorf("cloning the parent utility VM image failed: %w", err) } w.HasUtilityVM = true } @@ -451,7 +451,7 @@ func (w *legacyLayerWriter) reset() error { for { bhdr, err := br.Next() - if err == io.EOF { + if errors.Is(err, io.EOF) { // end of backupstream data break } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/cimfs.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/cimfs.go index d04bffc1..21664577 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/cimfs.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/cimfs.go @@ -1,3 +1,5 @@ +//go:build windows + package winapi import ( @@ -34,7 +36,7 @@ type CimFsFileMetadata struct { //sys CimDismountImage(volumeID *g) (hr error) = cimfs.CimDismountImage? //sys CimCreateImage(imagePath string, oldFSName *uint16, newFSName *uint16, cimFSHandle *FsHandle) (hr error) = cimfs.CimCreateImage? -//sys CimCloseImage(cimFSHandle FsHandle) (hr error) = cimfs.CimCloseImage? +//sys CimCloseImage(cimFSHandle FsHandle) = cimfs.CimCloseImage? //sys CimCommitImage(cimFSHandle FsHandle) (hr error) = cimfs.CimCommitImage? //sys CimCreateFile(cimFSHandle FsHandle, path string, file *CimFsFileMetadata, cimStreamHandle *StreamHandle) (hr error) = cimfs.CimCreateFile? diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go index b5b9fccc..ffd3cd7f 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go @@ -184,18 +184,12 @@ func _CMLocateDevNode(pdnDevInst *uint32, pDeviceID *uint16, uFlags uint32) (hr return } -func CimCloseImage(cimFSHandle FsHandle) (hr error) { - hr = procCimCloseImage.Find() - if hr != nil { +func CimCloseImage(cimFSHandle FsHandle) (err error) { + err = procCimCloseImage.Find() + if err != nil { return } - r0, _, _ := syscall.Syscall(procCimCloseImage.Addr(), 1, uintptr(cimFSHandle), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } + syscall.Syscall(procCimCloseImage.Addr(), 1, uintptr(cimFSHandle), 0, 0) return } diff --git a/vendor/github.com/aead/serpent/.gitignore b/vendor/github.com/aead/serpent/.gitignore new file mode 100644 index 00000000..9d3d8437 --- /dev/null +++ b/vendor/github.com/aead/serpent/.gitignore @@ -0,0 +1,25 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +.vscode + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/aead/serpent/LICENSE b/vendor/github.com/aead/serpent/LICENSE new file mode 100644 index 00000000..b6a9210b --- /dev/null +++ b/vendor/github.com/aead/serpent/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Andreas Auernhammer + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/aead/serpent/README.md b/vendor/github.com/aead/serpent/README.md new file mode 100644 index 00000000..6dbceee8 --- /dev/null +++ b/vendor/github.com/aead/serpent/README.md @@ -0,0 +1,9 @@ +[![Godoc Reference](https://godoc.org/github.com/aead/serpent?status.svg)](https://godoc.org/github.com/aead/serpent) + +## The Serpent block cipher + +Serpent is a symmetric key block cipher that was a finalist in the Advanced Encryption Standard (AES) contest, +where it was ranked second to Rijndael. Serpent was designed by Ross Anderson, Eli Biham, and Lars Knudsen. + +### Installation +Install in your GOPATH: `go get -u github.com/aead/serpent` diff --git a/vendor/github.com/aead/serpent/sbox_ref.go b/vendor/github.com/aead/serpent/sbox_ref.go new file mode 100644 index 00000000..515afc69 --- /dev/null +++ b/vendor/github.com/aead/serpent/sbox_ref.go @@ -0,0 +1,316 @@ +// Copyright (c) 2016 Andreas Auernhammer. All rights reserved. +// Use of this source code is governed by a license that can be +// found in the LICENSE file. + +package serpent + +// The linear transformation of serpent +// This version, tries not to minimize the +// number of registers, but maximize parallism. +func linear(v0, v1, v2, v3 *uint32) { + t0 := ((*v0 << 13) | (*v0 >> (32 - 13))) + t2 := ((*v2 << 3) | (*v2 >> (32 - 3))) + t1 := *v1 ^ t0 ^ t2 + t3 := *v3 ^ t2 ^ (t0 << 3) + *v1 = (t1 << 1) | (t1 >> (32 - 1)) + *v3 = (t3 << 7) | (t3 >> (32 - 7)) + t0 ^= *v1 ^ *v3 + t2 ^= *v3 ^ (*v1 << 7) + *v0 = (t0 << 5) | (t0 >> (32 - 5)) + *v2 = (t2 << 22) | (t2 >> (32 - 22)) +} + +// The inverse linear transformation of serpent +// This version, tries not to minimize the +// number of registers, but maximize parallism. +func linearInv(v0, v1, v2, v3 *uint32) { + t2 := (*v2 >> 22) | (*v2 << (32 - 22)) + t0 := (*v0 >> 5) | (*v0 << (32 - 5)) + t2 ^= *v3 ^ (*v1 << 7) + t0 ^= *v1 ^ *v3 + t3 := (*v3 >> 7) | (*v3 << (32 - 7)) + t1 := (*v1 >> 1) | (*v1 << (32 - 1)) + *v3 = t3 ^ t2 ^ (t0 << 3) + *v1 = t1 ^ t0 ^ t2 + *v2 = (t2 >> 3) | (t2 << (32 - 3)) + *v0 = (t0 >> 13) | (t0 << (32 - 13)) +} + +// The following functions sb0,sb1, ..., sb7 represent the 8 Serpent S-Boxes. +// sb0Inv til sb7Inv are the inverse functions (e.g. sb0Inv is the Inverse to sb0 +// and vice versa). +// The S-Boxes differ from the original Serpent definitions. This is for +// optimisation. The functions use the Serpent S-Box improvements for (non x86) +// from Dr. B. R. Gladman and Sam Simpson. + +// S-Box 0 +func sb0(r0, r1, r2, r3 *uint32) { + t0 := *r0 ^ *r3 + t1 := *r2 ^ t0 + t2 := *r1 ^ t1 + *r3 = (*r0 & *r3) ^ t2 + t3 := *r0 ^ (*r1 & t0) + *r2 = t2 ^ (*r2 | t3) + t4 := *r3 & (t1 ^ t3) + *r1 = (^t1) ^ t4 + *r0 = t4 ^ (^t3) +} + +// Inverse S-Box 0 +func sb0Inv(r0, r1, r2, r3 *uint32) { + t0 := ^(*r0) + t1 := *r0 ^ *r1 + t2 := *r3 ^ (t0 | t1) + t3 := *r2 ^ t2 + *r2 = t1 ^ t3 + t4 := t0 ^ (*r3 & t1) + *r1 = t2 ^ (*r2 & t4) + *r3 = (*r0 & t2) ^ (t3 | *r1) + *r0 = *r3 ^ (t3 ^ t4) +} + +// S-Box 1 +func sb1(r0, r1, r2, r3 *uint32) { + t0 := *r1 ^ (^(*r0)) + t1 := *r2 ^ (*r0 | t0) + *r2 = *r3 ^ t1 + t2 := *r1 ^ (*r3 | t0) + t3 := t0 ^ *r2 + *r3 = t3 ^ (t1 & t2) + t4 := t1 ^ t2 + *r1 = *r3 ^ t4 + *r0 = t1 ^ (t3 & t4) +} + +// Inverse S-Box 1 +func sb1Inv(r0, r1, r2, r3 *uint32) { + t0 := *r1 ^ *r3 + t1 := *r0 ^ (*r1 & t0) + t2 := t0 ^ t1 + *r3 = *r2 ^ t2 + t3 := *r1 ^ (t0 & t1) + t4 := *r3 | t3 + *r1 = t1 ^ t4 + t5 := ^(*r1) + t6 := *r3 ^ t3 + *r0 = t5 ^ t6 + *r2 = t2 ^ (t5 | t6) +} + +// S-Box 2 +func sb2(r0, r1, r2, r3 *uint32) { + v0 := *r0 // save r0 + v3 := *r3 // save r3 + t0 := ^v0 + t1 := *r1 ^ v3 + t2 := *r2 & t0 + *r0 = t1 ^ t2 + t3 := *r2 ^ t0 + t4 := *r2 ^ *r0 + t5 := *r1 & t4 + *r3 = t3 ^ t5 + *r2 = v0 ^ ((v3 | t5) & (*r0 | t3)) + *r1 = (t1 ^ *r3) ^ (*r2 ^ (v3 | t0)) +} + +// Inverse S-Box 2 +func sb2Inv(r0, r1, r2, r3 *uint32) { + v0 := *r0 // save r0 + v3 := *r3 // save r3 + t0 := *r1 ^ v3 + t1 := ^t0 + t2 := v0 ^ *r2 + t3 := *r2 ^ t0 + t4 := *r1 & t3 + *r0 = t2 ^ t4 + t5 := v0 | t1 + t6 := v3 ^ t5 + t7 := t2 | t6 + *r3 = t0 ^ t7 + t8 := ^t3 + t9 := *r0 | *r3 + *r1 = t8 ^ t9 + *r2 = (v3 & t8) ^ (t2 ^ t9) +} + +// S-Box 3 +func sb3(r0, r1, r2, r3 *uint32) { + v1 := *r1 // save r1 + v3 := *r3 // save r3 + t0 := *r0 ^ *r1 + t1 := *r0 & *r2 + t2 := *r0 | *r3 + t3 := *r2 ^ *r3 + t4 := t0 & t2 + t5 := t1 | t4 + *r2 = t3 ^ t5 + t6 := *r1 ^ t2 + t7 := t5 ^ t6 + t8 := t3 & t7 + *r0 = t0 ^ t8 + t9 := *r2 & *r0 + *r1 = t7 ^ t9 + *r3 = (v1 | v3) ^ (t3 ^ t9) +} + +// Inverse S-Box 3 +func sb3Inv(r0, r1, r2, r3 *uint32) { + t0 := *r0 | *r1 + t1 := *r1 ^ *r2 + t2 := *r1 & t1 + t3 := *r0 ^ t2 + t4 := *r2 ^ t3 + t5 := *r3 | t3 + *r0 = t1 ^ t5 + t6 := t1 | t5 + t7 := *r3 ^ t6 + *r2 = t4 ^ t7 + t8 := t0 ^ t7 + t9 := *r0 & t8 + *r3 = t3 ^ t9 + *r1 = *r3 ^ (*r0 ^ t8) +} + +// S-Box 4 +func sb4(r0, r1, r2, r3 *uint32) { + v0 := *r0 // save r0 + t0 := v0 ^ *r3 + t1 := *r3 & t0 + t2 := *r2 ^ t1 + t3 := *r1 | t2 + *r3 = t0 ^ t3 + t4 := ^(*r1) + t5 := t0 | t4 + *r0 = t2 ^ t5 + t6 := v0 & *r0 + t7 := t0 ^ t4 + t8 := t3 & t7 + *r2 = t6 ^ t8 + *r1 = (v0 ^ t2) ^ (t7 & *r2) +} + +// Inverse S-Box 4 +func sb4Inv(r0, r1, r2, r3 *uint32) { + v3 := *r3 // save r3 + t0 := *r2 | v3 + t1 := *r0 & t0 + t2 := *r1 ^ t1 + t3 := *r0 & t2 + t4 := *r2 ^ t3 + *r1 = v3 ^ t4 + t5 := ^(*r0) + t6 := t4 & *r1 + *r3 = t2 ^ t6 + t7 := *r1 | t5 + t8 := v3 ^ t7 + *r0 = *r3 ^ t8 + *r2 = (t2 & t8) ^ (*r1 ^ t5) +} + +// S-Box 5 +func sb5(r0, r1, r2, r3 *uint32) { + v1 := *r1 // save r1 + t0 := ^(*r0) + t1 := *r0 ^ v1 + t2 := *r0 ^ *r3 + t3 := *r2 ^ t0 + t4 := t1 | t2 + *r0 = t3 ^ t4 + t5 := *r3 & *r0 + t6 := t1 ^ *r0 + *r1 = t5 ^ t6 + t7 := t0 | *r0 + t8 := t1 | t5 + t9 := t2 ^ t7 + *r2 = t8 ^ t9 + *r3 = (v1 ^ t5) ^ (*r1 & t9) +} + +// Inverse S-Box 5 +func sb5Inv(r0, r1, r2, r3 *uint32) { + v0 := *r0 // save r0 + v1 := *r1 // save r1 + v3 := *r3 // save r3 + t0 := ^(*r2) + t1 := v1 & t0 + t2 := v3 ^ t1 + t3 := v0 & t2 + t4 := v1 ^ t0 + *r3 = t3 ^ t4 + t5 := v1 | *r3 + t6 := v0 & t5 + *r1 = t2 ^ t6 + t7 := v0 | v3 + t8 := t0 ^ t5 + *r0 = t7 ^ t8 + *r2 = (v1 & t7) ^ (t3 | (v0 ^ *r2)) +} + +// S-Box 6 +func sb6(r0, r1, r2, r3 *uint32) { + t0 := ^(*r0) + t1 := *r0 ^ *r3 + t2 := *r1 ^ t1 + t3 := t0 | t1 + t4 := *r2 ^ t3 + *r1 = *r1 ^ t4 + t5 := t1 | *r1 + t6 := *r3 ^ t5 + t7 := t4 & t6 + *r2 = t2 ^ t7 + t8 := t4 ^ t6 + *r0 = *r2 ^ t8 + *r3 = (^t4) ^ (t2 & t8) +} + +// Inverse S-Box 6 +func sb6Inv(r0, r1, r2, r3 *uint32) { + v1 := *r1 // save r1 + v3 := *r3 // save r3 + t0 := ^(*r0) + t1 := *r0 ^ v1 + t2 := *r2 ^ t1 + t3 := *r2 | t0 + t4 := v3 ^ t3 + *r1 = t2 ^ t4 + t5 := t2 & t4 + t6 := t1 ^ t5 + t7 := v1 | t6 + *r3 = t4 ^ t7 + t8 := v1 | *r3 + *r0 = t6 ^ t8 + *r2 = (v3 & t0) ^ (t2 ^ t8) +} + +// S-Box 7 +func sb7(r0, r1, r2, r3 *uint32) { + t0 := *r1 ^ *r2 + t1 := *r2 & t0 + t2 := *r3 ^ t1 + t3 := *r0 ^ t2 + t4 := *r3 | t0 + t5 := t3 & t4 + *r1 = *r1 ^ t5 + t6 := t2 | *r1 + t7 := *r0 & t3 + *r3 = t0 ^ t7 + t8 := t3 ^ t6 + t9 := *r3 & t8 + *r2 = t2 ^ t9 + *r0 = (^t8) ^ (*r3 & *r2) +} + +// Inverse S-Box 7 +func sb7Inv(r0, r1, r2, r3 *uint32) { + v0 := *r0 // save r0 + v3 := *r3 // save r3 + t0 := *r2 | (v0 & *r1) + t1 := v3 & (v0 | *r1) + *r3 = t0 ^ t1 + t2 := ^v3 + t3 := *r1 ^ t1 + t4 := t3 | (*r3 ^ t2) + *r1 = v0 ^ t4 + *r0 = (*r2 ^ t3) ^ (v3 | *r1) + *r2 = (t0 ^ *r1) ^ (*r0 ^ (v0 & *r3)) +} diff --git a/vendor/github.com/aead/serpent/serpent.go b/vendor/github.com/aead/serpent/serpent.go new file mode 100644 index 00000000..b3fb811d --- /dev/null +++ b/vendor/github.com/aead/serpent/serpent.go @@ -0,0 +1,119 @@ +// Copyright (c) 2016 Andreas Auernhammer. All rights reserved. +// Use of this source code is governed by a license that can be +// found in the LICENSE file. + +// Package serpent implements the Serpent block cipher +// submitted to the AES challenge. Serpent was designed by +// Ross Anderson, Eli Biham und Lars Knudsen. +// The block cipher takes a 128, 192 or 256 bit key and +// has a block size of 128 bit. +package serpent // import "github.com/aead/serpent" + +import ( + "crypto/cipher" + "errors" +) + +// BlockSize is the serpent block size in bytes. +const BlockSize = 16 + +const phi = 0x9e3779b9 // The Serpent phi constant (sqrt(5) - 1) * 2**31 + +var errKeySize = errors.New("invalid key size") + +// NewCipher returns a new cipher.Block implementing the serpent block cipher. +// The key argument must be 128, 192 or 256 bit (16, 24, 32 byte). +func NewCipher(key []byte) (cipher.Block, error) { + if k := len(key); k != 16 && k != 24 && k != 32 { + return nil, errKeySize + } + s := &subkeys{} + s.keySchedule(key) + return s, nil +} + +// The 132 32 bit subkeys of serpent +type subkeys [132]uint32 + +func (s *subkeys) BlockSize() int { return BlockSize } + +func (s *subkeys) Encrypt(dst, src []byte) { + if len(src) < BlockSize { + panic("src buffer to small") + } + if len(dst) < BlockSize { + panic("dst buffer to small") + } + encryptBlock(dst, src, s) +} + +func (s *subkeys) Decrypt(dst, src []byte) { + if len(src) < BlockSize { + panic("src buffer to small") + } + if len(dst) < BlockSize { + panic("dst buffer to small") + } + decryptBlock(dst, src, s) +} + +// The key schedule of serpent. +func (s *subkeys) keySchedule(key []byte) { + var k [16]uint32 + j := 0 + for i := 0; i+4 <= len(key); i += 4 { + k[j] = uint32(key[i]) | uint32(key[i+1])<<8 | uint32(key[i+2])<<16 | uint32(key[i+3])<<24 + j++ + } + if j < 8 { + k[j] = 1 + } + + for i := 8; i < 16; i++ { + x := k[i-8] ^ k[i-5] ^ k[i-3] ^ k[i-1] ^ phi ^ uint32(i-8) + k[i] = (x << 11) | (x >> 21) + s[i-8] = k[i] + } + for i := 8; i < 132; i++ { + x := s[i-8] ^ s[i-5] ^ s[i-3] ^ s[i-1] ^ phi ^ uint32(i) + s[i] = (x << 11) | (x >> 21) + } + + sb3(&s[0], &s[1], &s[2], &s[3]) + sb2(&s[4], &s[5], &s[6], &s[7]) + sb1(&s[8], &s[9], &s[10], &s[11]) + sb0(&s[12], &s[13], &s[14], &s[15]) + sb7(&s[16], &s[17], &s[18], &s[19]) + sb6(&s[20], &s[21], &s[22], &s[23]) + sb5(&s[24], &s[25], &s[26], &s[27]) + sb4(&s[28], &s[29], &s[30], &s[31]) + + sb3(&s[32], &s[33], &s[34], &s[35]) + sb2(&s[36], &s[37], &s[38], &s[39]) + sb1(&s[40], &s[41], &s[42], &s[43]) + sb0(&s[44], &s[45], &s[46], &s[47]) + sb7(&s[48], &s[49], &s[50], &s[51]) + sb6(&s[52], &s[53], &s[54], &s[55]) + sb5(&s[56], &s[57], &s[58], &s[59]) + sb4(&s[60], &s[61], &s[62], &s[63]) + + sb3(&s[64], &s[65], &s[66], &s[67]) + sb2(&s[68], &s[69], &s[70], &s[71]) + sb1(&s[72], &s[73], &s[74], &s[75]) + sb0(&s[76], &s[77], &s[78], &s[79]) + sb7(&s[80], &s[81], &s[82], &s[83]) + sb6(&s[84], &s[85], &s[86], &s[87]) + sb5(&s[88], &s[89], &s[90], &s[91]) + sb4(&s[92], &s[93], &s[94], &s[95]) + + sb3(&s[96], &s[97], &s[98], &s[99]) + sb2(&s[100], &s[101], &s[102], &s[103]) + sb1(&s[104], &s[105], &s[106], &s[107]) + sb0(&s[108], &s[109], &s[110], &s[111]) + sb7(&s[112], &s[113], &s[114], &s[115]) + sb6(&s[116], &s[117], &s[118], &s[119]) + sb5(&s[120], &s[121], &s[122], &s[123]) + sb4(&s[124], &s[125], &s[126], &s[127]) + + sb3(&s[128], &s[129], &s[130], &s[131]) +} diff --git a/vendor/github.com/aead/serpent/serpent_ref.go b/vendor/github.com/aead/serpent/serpent_ref.go new file mode 100644 index 00000000..2d3ff02a --- /dev/null +++ b/vendor/github.com/aead/serpent/serpent_ref.go @@ -0,0 +1,276 @@ +// Copyright (c) 2016 Andreas Auernhammer. All rights reserved. +// Use of this source code is governed by a license that can be +// found in the LICENSE file. + +package serpent + +// Encrypts one block with the given 132 sub-keys sk. +func encryptBlock(dst, src []byte, sk *subkeys) { + // Transform the input block to 4 x 32 bit registers + r0 := uint32(src[0]) | uint32(src[1])<<8 | uint32(src[2])<<16 | uint32(src[3])<<24 + r1 := uint32(src[4]) | uint32(src[5])<<8 | uint32(src[6])<<16 | uint32(src[7])<<24 + r2 := uint32(src[8]) | uint32(src[9])<<8 | uint32(src[10])<<16 | uint32(src[11])<<24 + r3 := uint32(src[12]) | uint32(src[13])<<8 | uint32(src[14])<<16 | uint32(src[15])<<24 + + // Encrypt the block with the 132 sub-keys and 8 S-Boxes + r0, r1, r2, r3 = r0^sk[0], r1^sk[1], r2^sk[2], r3^sk[3] + sb0(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[4], r1^sk[5], r2^sk[6], r3^sk[7] + sb1(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[8], r1^sk[9], r2^sk[10], r3^sk[11] + sb2(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[12], r1^sk[13], r2^sk[14], r3^sk[15] + sb3(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[16], r1^sk[17], r2^sk[18], r3^sk[19] + sb4(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[20], r1^sk[21], r2^sk[22], r3^sk[23] + sb5(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[24], r1^sk[25], r2^sk[26], r3^sk[27] + sb6(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[28], r1^sk[29], r2^sk[30], r3^sk[31] + sb7(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + + r0, r1, r2, r3 = r0^sk[32], r1^sk[33], r2^sk[34], r3^sk[35] + sb0(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[36], r1^sk[37], r2^sk[38], r3^sk[39] + sb1(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[40], r1^sk[41], r2^sk[42], r3^sk[43] + sb2(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[44], r1^sk[45], r2^sk[46], r3^sk[47] + sb3(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[48], r1^sk[49], r2^sk[50], r3^sk[51] + sb4(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[52], r1^sk[53], r2^sk[54], r3^sk[55] + sb5(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[56], r1^sk[57], r2^sk[58], r3^sk[59] + sb6(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[60], r1^sk[61], r2^sk[62], r3^sk[63] + sb7(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + + r0, r1, r2, r3 = r0^sk[64], r1^sk[65], r2^sk[66], r3^sk[67] + sb0(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[68], r1^sk[69], r2^sk[70], r3^sk[71] + sb1(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[72], r1^sk[73], r2^sk[74], r3^sk[75] + sb2(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[76], r1^sk[77], r2^sk[78], r3^sk[79] + sb3(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[80], r1^sk[81], r2^sk[82], r3^sk[83] + sb4(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[84], r1^sk[85], r2^sk[86], r3^sk[87] + sb5(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[88], r1^sk[89], r2^sk[90], r3^sk[91] + sb6(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[92], r1^sk[93], r2^sk[94], r3^sk[95] + sb7(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + + r0, r1, r2, r3 = r0^sk[96], r1^sk[97], r2^sk[98], r3^sk[99] + sb0(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[100], r1^sk[101], r2^sk[102], r3^sk[103] + sb1(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[104], r1^sk[105], r2^sk[106], r3^sk[107] + sb2(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[108], r1^sk[109], r2^sk[110], r3^sk[111] + sb3(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[112], r1^sk[113], r2^sk[114], r3^sk[115] + sb4(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[116], r1^sk[117], r2^sk[118], r3^sk[119] + sb5(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[120], r1^sk[121], r2^sk[122], r3^sk[123] + sb6(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[124], r1^sk[125], r2^sk[126], r3^sk[127] + sb7(&r0, &r1, &r2, &r3) + + // whitening + r0 ^= sk[128] + r1 ^= sk[129] + r2 ^= sk[130] + r3 ^= sk[131] + + // write the encrypted block to the output + + dst[0] = byte(r0) + dst[1] = byte(r0 >> 8) + dst[2] = byte(r0 >> 16) + dst[3] = byte(r0 >> 24) + dst[4] = byte(r1) + dst[5] = byte(r1 >> 8) + dst[6] = byte(r1 >> 16) + dst[7] = byte(r1 >> 24) + dst[8] = byte(r2) + dst[9] = byte(r2 >> 8) + dst[10] = byte(r2 >> 16) + dst[11] = byte(r2 >> 24) + dst[12] = byte(r3) + dst[13] = byte(r3 >> 8) + dst[14] = byte(r3 >> 16) + dst[15] = byte(r3 >> 24) +} + +// Decrypts one block with the given 132 sub-keys sk. +func decryptBlock(dst, src []byte, sk *subkeys) { + // Transform the input block to 4 x 32 bit registers + r0 := uint32(src[0]) | uint32(src[1])<<8 | uint32(src[2])<<16 | uint32(src[3])<<24 + r1 := uint32(src[4]) | uint32(src[5])<<8 | uint32(src[6])<<16 | uint32(src[7])<<24 + r2 := uint32(src[8]) | uint32(src[9])<<8 | uint32(src[10])<<16 | uint32(src[11])<<24 + r3 := uint32(src[12]) | uint32(src[13])<<8 | uint32(src[14])<<16 | uint32(src[15])<<24 + + // undo whitening + r0 ^= sk[128] + r1 ^= sk[129] + r2 ^= sk[130] + r3 ^= sk[131] + + // Decrypt the block with the 132 sub-keys and 8 S-Boxes + sb7Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[124], r1^sk[125], r2^sk[126], r3^sk[127] + linearInv(&r0, &r1, &r2, &r3) + sb6Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[120], r1^sk[121], r2^sk[122], r3^sk[123] + linearInv(&r0, &r1, &r2, &r3) + sb5Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[116], r1^sk[117], r2^sk[118], r3^sk[119] + linearInv(&r0, &r1, &r2, &r3) + sb4Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[112], r1^sk[113], r2^sk[114], r3^sk[115] + linearInv(&r0, &r1, &r2, &r3) + sb3Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[108], r1^sk[109], r2^sk[110], r3^sk[111] + linearInv(&r0, &r1, &r2, &r3) + sb2Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[104], r1^sk[105], r2^sk[106], r3^sk[107] + linearInv(&r0, &r1, &r2, &r3) + sb1Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[100], r1^sk[101], r2^sk[102], r3^sk[103] + linearInv(&r0, &r1, &r2, &r3) + sb0Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[96], r1^sk[97], r2^sk[98], r3^sk[99] + linearInv(&r0, &r1, &r2, &r3) + + sb7Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[92], r1^sk[93], r2^sk[94], r3^sk[95] + linearInv(&r0, &r1, &r2, &r3) + sb6Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[88], r1^sk[89], r2^sk[90], r3^sk[91] + linearInv(&r0, &r1, &r2, &r3) + sb5Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[84], r1^sk[85], r2^sk[86], r3^sk[87] + linearInv(&r0, &r1, &r2, &r3) + sb4Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[80], r1^sk[81], r2^sk[82], r3^sk[83] + linearInv(&r0, &r1, &r2, &r3) + sb3Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[76], r1^sk[77], r2^sk[78], r3^sk[79] + linearInv(&r0, &r1, &r2, &r3) + sb2Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[72], r1^sk[73], r2^sk[74], r3^sk[75] + linearInv(&r0, &r1, &r2, &r3) + sb1Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[68], r1^sk[69], r2^sk[70], r3^sk[71] + linearInv(&r0, &r1, &r2, &r3) + sb0Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[64], r1^sk[65], r2^sk[66], r3^sk[67] + linearInv(&r0, &r1, &r2, &r3) + + sb7Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[60], r1^sk[61], r2^sk[62], r3^sk[63] + linearInv(&r0, &r1, &r2, &r3) + sb6Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[56], r1^sk[57], r2^sk[58], r3^sk[59] + linearInv(&r0, &r1, &r2, &r3) + sb5Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[52], r1^sk[53], r2^sk[54], r3^sk[55] + linearInv(&r0, &r1, &r2, &r3) + sb4Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[48], r1^sk[49], r2^sk[50], r3^sk[51] + linearInv(&r0, &r1, &r2, &r3) + sb3Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[44], r1^sk[45], r2^sk[46], r3^sk[47] + linearInv(&r0, &r1, &r2, &r3) + sb2Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[40], r1^sk[41], r2^sk[42], r3^sk[43] + linearInv(&r0, &r1, &r2, &r3) + sb1Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[36], r1^sk[37], r2^sk[38], r3^sk[39] + linearInv(&r0, &r1, &r2, &r3) + sb0Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[32], r1^sk[33], r2^sk[34], r3^sk[35] + linearInv(&r0, &r1, &r2, &r3) + + sb7Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[28], r1^sk[29], r2^sk[30], r3^sk[31] + linearInv(&r0, &r1, &r2, &r3) + sb6Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[24], r1^sk[25], r2^sk[26], r3^sk[27] + linearInv(&r0, &r1, &r2, &r3) + sb5Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[20], r1^sk[21], r2^sk[22], r3^sk[23] + linearInv(&r0, &r1, &r2, &r3) + sb4Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[16], r1^sk[17], r2^sk[18], r3^sk[19] + linearInv(&r0, &r1, &r2, &r3) + sb3Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[12], r1^sk[13], r2^sk[14], r3^sk[15] + linearInv(&r0, &r1, &r2, &r3) + sb2Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[8], r1^sk[9], r2^sk[10], r3^sk[11] + linearInv(&r0, &r1, &r2, &r3) + sb1Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[4], r1^sk[5], r2^sk[6], r3^sk[7] + linearInv(&r0, &r1, &r2, &r3) + sb0Inv(&r0, &r1, &r2, &r3) + + r0 ^= sk[0] + r1 ^= sk[1] + r2 ^= sk[2] + r3 ^= sk[3] + + // write the decrypted block to the output + dst[0] = byte(r0) + dst[1] = byte(r0 >> 8) + dst[2] = byte(r0 >> 16) + dst[3] = byte(r0 >> 24) + dst[4] = byte(r1) + dst[5] = byte(r1 >> 8) + dst[6] = byte(r1 >> 16) + dst[7] = byte(r1 >> 24) + dst[8] = byte(r2) + dst[9] = byte(r2 >> 8) + dst[10] = byte(r2 >> 16) + dst[11] = byte(r2 >> 24) + dst[12] = byte(r3) + dst[13] = byte(r3 >> 8) + dst[14] = byte(r3 >> 16) + dst[15] = byte(r3 >> 24) +} diff --git a/vendor/github.com/containerd/typeurl/v2/.gitignore b/vendor/github.com/containerd/typeurl/v2/.gitignore new file mode 100644 index 00000000..d5384677 --- /dev/null +++ b/vendor/github.com/containerd/typeurl/v2/.gitignore @@ -0,0 +1,2 @@ +*.test +coverage.txt diff --git a/vendor/github.com/containerd/typeurl/v2/LICENSE b/vendor/github.com/containerd/typeurl/v2/LICENSE new file mode 100644 index 00000000..584149b6 --- /dev/null +++ b/vendor/github.com/containerd/typeurl/v2/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright The containerd Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/typeurl/v2/README.md b/vendor/github.com/containerd/typeurl/v2/README.md new file mode 100644 index 00000000..e3d0742f --- /dev/null +++ b/vendor/github.com/containerd/typeurl/v2/README.md @@ -0,0 +1,20 @@ +# typeurl + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/containerd/typeurl)](https://pkg.go.dev/github.com/containerd/typeurl) +[![Build Status](https://github.com/containerd/typeurl/workflows/CI/badge.svg)](https://github.com/containerd/typeurl/actions?query=workflow%3ACI) +[![codecov](https://codecov.io/gh/containerd/typeurl/branch/master/graph/badge.svg)](https://codecov.io/gh/containerd/typeurl) +[![Go Report Card](https://goreportcard.com/badge/github.com/containerd/typeurl)](https://goreportcard.com/report/github.com/containerd/typeurl) + +A Go package for managing the registration, marshaling, and unmarshaling of encoded types. + +This package helps when types are sent over a ttrpc/GRPC API and marshaled as a protobuf [Any](https://pkg.go.dev/google.golang.org/protobuf@v1.27.1/types/known/anypb#Any) + +## Project details + +**typeurl** is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). +As a containerd sub-project, you will find the: + * [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md), + * [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS), + * and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md) + +information in our [`containerd/project`](https://github.com/containerd/project) repository. diff --git a/vendor/github.com/containerd/typeurl/v2/doc.go b/vendor/github.com/containerd/typeurl/v2/doc.go new file mode 100644 index 00000000..c0d0fd20 --- /dev/null +++ b/vendor/github.com/containerd/typeurl/v2/doc.go @@ -0,0 +1,83 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package typeurl + +// Package typeurl assists with managing the registration, marshaling, and +// unmarshaling of types encoded as protobuf.Any. +// +// A protobuf.Any is a proto message that can contain any arbitrary data. It +// consists of two components, a TypeUrl and a Value, and its proto definition +// looks like this: +// +// message Any { +// string type_url = 1; +// bytes value = 2; +// } +// +// The TypeUrl is used to distinguish the contents from other proto.Any +// messages. This typeurl library manages these URLs to enable automagic +// marshaling and unmarshaling of the contents. +// +// For example, consider this go struct: +// +// type Foo struct { +// Field1 string +// Field2 string +// } +// +// To use typeurl, types must first be registered. This is typically done in +// the init function +// +// func init() { +// typeurl.Register(&Foo{}, "Foo") +// } +// +// This will register the type Foo with the url path "Foo". The arguments to +// Register are variadic, and are used to construct a url path. Consider this +// example, from the github.com/containerd/containerd/client package: +// +// func init() { +// const prefix = "types.containerd.io" +// // register TypeUrls for commonly marshaled external types +// major := strconv.Itoa(specs.VersionMajor) +// typeurl.Register(&specs.Spec{}, prefix, "opencontainers/runtime-spec", major, "Spec") +// // this function has more Register calls, which are elided. +// } +// +// This registers several types under a more complex url, which ends up mapping +// to `types.containerd.io/opencontainers/runtime-spec/1/Spec` (or some other +// value for major). +// +// Once a type is registered, it can be marshaled to a proto.Any message simply +// by calling `MarshalAny`, like this: +// +// foo := &Foo{Field1: "value1", Field2: "value2"} +// anyFoo, err := typeurl.MarshalAny(foo) +// +// MarshalAny will resolve the correct URL for the type. If the type in +// question implements the proto.Message interface, then it will be marshaled +// as a proto message. Otherwise, it will be marshaled as json. This means that +// typeurl will work on any arbitrary data, whether or not it has a proto +// definition, as long as it can be serialized to json. +// +// To unmarshal, the process is simply inverse: +// +// iface, err := typeurl.UnmarshalAny(anyFoo) +// foo := iface.(*Foo) +// +// The correct type is automatically chosen from the type registry, and the +// returned interface can be cast straight to that type. diff --git a/vendor/github.com/containerd/typeurl/v2/types.go b/vendor/github.com/containerd/typeurl/v2/types.go new file mode 100644 index 00000000..8d6665bb --- /dev/null +++ b/vendor/github.com/containerd/typeurl/v2/types.go @@ -0,0 +1,269 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package typeurl + +import ( + "encoding/json" + "errors" + "fmt" + "path" + "reflect" + "sync" + + gogoproto "github.com/gogo/protobuf/proto" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoregistry" +) + +var ( + mu sync.RWMutex + registry = make(map[reflect.Type]string) +) + +// Definitions of common error types used throughout typeurl. +// +// These error types are used with errors.Wrap and errors.Wrapf to add context +// to an error. +// +// To detect an error class, use errors.Is() functions to tell whether an +// error is of this type. + +var ( + ErrNotFound = errors.New("not found") +) + +// Any contains an arbitrary protcol buffer message along with its type. +// +// While there is google.golang.org/protobuf/types/known/anypb.Any, +// we'd like to have our own to hide the underlying protocol buffer +// implementations from containerd clients. +// +// https://developers.google.com/protocol-buffers/docs/proto3#any +type Any interface { + // GetTypeUrl returns a URL/resource name that uniquely identifies + // the type of the serialized protocol buffer message. + GetTypeUrl() string + + // GetValue returns a valid serialized protocol buffer of the type that + // GetTypeUrl() indicates. + GetValue() []byte +} + +type anyType struct { + typeURL string + value []byte +} + +func (a *anyType) GetTypeUrl() string { + if a == nil { + return "" + } + return a.typeURL +} + +func (a *anyType) GetValue() []byte { + if a == nil { + return nil + } + return a.value +} + +// Register a type with a base URL for JSON marshaling. When the MarshalAny and +// UnmarshalAny functions are called they will treat the Any type value as JSON. +// To use protocol buffers for handling the Any value the proto.Register +// function should be used instead of this function. +func Register(v interface{}, args ...string) { + var ( + t = tryDereference(v) + p = path.Join(args...) + ) + mu.Lock() + defer mu.Unlock() + if et, ok := registry[t]; ok { + if et != p { + panic(fmt.Errorf("type registered with alternate path %q != %q", et, p)) + } + return + } + registry[t] = p +} + +// TypeURL returns the type url for a registered type. +func TypeURL(v interface{}) (string, error) { + mu.RLock() + u, ok := registry[tryDereference(v)] + mu.RUnlock() + if !ok { + switch t := v.(type) { + case proto.Message: + return string(t.ProtoReflect().Descriptor().FullName()), nil + case gogoproto.Message: + return gogoproto.MessageName(t), nil + default: + return "", fmt.Errorf("type %s: %w", reflect.TypeOf(v), ErrNotFound) + } + } + return u, nil +} + +// Is returns true if the type of the Any is the same as v. +func Is(any Any, v interface{}) bool { + // call to check that v is a pointer + tryDereference(v) + url, err := TypeURL(v) + if err != nil { + return false + } + return any.GetTypeUrl() == url +} + +// MarshalAny marshals the value v into an any with the correct TypeUrl. +// If the provided object is already a proto.Any message, then it will be +// returned verbatim. If it is of type proto.Message, it will be marshaled as a +// protocol buffer. Otherwise, the object will be marshaled to json. +func MarshalAny(v interface{}) (Any, error) { + var marshal func(v interface{}) ([]byte, error) + switch t := v.(type) { + case Any: + // avoid reserializing the type if we have an any. + return t, nil + case proto.Message: + marshal = func(v interface{}) ([]byte, error) { + return proto.Marshal(t) + } + case gogoproto.Message: + marshal = func(v interface{}) ([]byte, error) { + return gogoproto.Marshal(t) + } + default: + marshal = json.Marshal + } + + url, err := TypeURL(v) + if err != nil { + return nil, err + } + + data, err := marshal(v) + if err != nil { + return nil, err + } + return &anyType{ + typeURL: url, + value: data, + }, nil +} + +// UnmarshalAny unmarshals the any type into a concrete type. +func UnmarshalAny(any Any) (interface{}, error) { + return UnmarshalByTypeURL(any.GetTypeUrl(), any.GetValue()) +} + +// UnmarshalByTypeURL unmarshals the given type and value to into a concrete type. +func UnmarshalByTypeURL(typeURL string, value []byte) (interface{}, error) { + return unmarshal(typeURL, value, nil) +} + +// UnmarshalTo unmarshals the any type into a concrete type passed in the out +// argument. It is identical to UnmarshalAny, but lets clients provide a +// destination type through the out argument. +func UnmarshalTo(any Any, out interface{}) error { + return UnmarshalToByTypeURL(any.GetTypeUrl(), any.GetValue(), out) +} + +// UnmarshalToByTypeURL unmarshals the given type and value into a concrete type passed +// in the out argument. It is identical to UnmarshalByTypeURL, but lets clients +// provide a destination type through the out argument. +func UnmarshalToByTypeURL(typeURL string, value []byte, out interface{}) error { + _, err := unmarshal(typeURL, value, out) + return err +} + +func unmarshal(typeURL string, value []byte, v interface{}) (interface{}, error) { + t, err := getTypeByUrl(typeURL) + if err != nil { + return nil, err + } + + if v == nil { + v = reflect.New(t.t).Interface() + } else { + // Validate interface type provided by client + vURL, err := TypeURL(v) + if err != nil { + return nil, err + } + if typeURL != vURL { + return nil, fmt.Errorf("can't unmarshal type %q to output %q", typeURL, vURL) + } + } + + if t.isProto { + switch t := v.(type) { + case proto.Message: + err = proto.Unmarshal(value, t) + case gogoproto.Message: + err = gogoproto.Unmarshal(value, t) + } + } else { + err = json.Unmarshal(value, v) + } + + return v, err +} + +type urlType struct { + t reflect.Type + isProto bool +} + +func getTypeByUrl(url string) (urlType, error) { + mu.RLock() + for t, u := range registry { + if u == url { + mu.RUnlock() + return urlType{ + t: t, + }, nil + } + } + mu.RUnlock() + // fallback to proto registry + t := gogoproto.MessageType(url) + if t != nil { + return urlType{ + // get the underlying Elem because proto returns a pointer to the type + t: t.Elem(), + isProto: true, + }, nil + } + mt, err := protoregistry.GlobalTypes.FindMessageByURL(url) + if err != nil { + return urlType{}, fmt.Errorf("type with url %s: %w", url, ErrNotFound) + } + empty := mt.New().Interface() + return urlType{t: reflect.TypeOf(empty).Elem(), isProto: true}, nil +} + +func tryDereference(v interface{}) reflect.Type { + t := reflect.TypeOf(v) + if t.Kind() == reflect.Ptr { + // require check of pointer but dereference to register + return t.Elem() + } + panic("v is not a pointer to a type") +} diff --git a/vendor/github.com/containers/buildah/.cirrus.yml b/vendor/github.com/containers/buildah/.cirrus.yml index e62030ff..ac12d66b 100644 --- a/vendor/github.com/containers/buildah/.cirrus.yml +++ b/vendor/github.com/containers/buildah/.cirrus.yml @@ -27,12 +27,12 @@ env: #### # GCE project where images live IMAGE_PROJECT: "libpod-218412" - FEDORA_NAME: "fedora-37" - PRIOR_FEDORA_NAME: "fedora-36" - DEBIAN_NAME: "debian-12" + FEDORA_NAME: "fedora-39β" + PRIOR_FEDORA_NAME: "fedora-38" + DEBIAN_NAME: "debian-13" # Image identifiers - IMAGE_SUFFIX: "c20230405t152256z-f37f36d12" + IMAGE_SUFFIX: "c20231004t194547z-f39f38d13" FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}" PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}" DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}" @@ -83,9 +83,9 @@ meta_task: GCPJSON: ENCRYPTED[d3614d6f5cc0e66be89d4252b3365fd84f14eee0259d4eb47e25fc0bc2842c7937f5ee8c882b7e547b4c5ec4b6733b14] GCPNAME: ENCRYPTED[8509e6a681b859479ce6aa275bd3c4ac82de5beec6df6057925afc4cd85b7ef2e879066ae8baaa2d453b82958e434578] GCPPROJECT: ENCRYPTED[cc09b62d0ec6746a3df685e663ad25d9d5af95ef5fd843c96f3d0ec9d7f065dc63216b9c685c9f43a776a1d403991494] - CIRRUS_CLONE_DEPTH: 1 # source not used - script: '/usr/local/bin/entrypoint.sh |& ${_TIMESTAMP}' + clone_script: 'true' + script: '/usr/local/bin/entrypoint.sh' smoke_task: @@ -112,7 +112,6 @@ smoke_task: vendor_task: name: "Test Vendoring" alias: vendor - only_if: ¬_multiarch $CIRRUS_CRON != 'multiarch' env: CIRRUS_WORKING_DIR: "/var/tmp/go/src/github.com/containers/buildah" @@ -121,7 +120,7 @@ vendor_task: # Runs within Cirrus's "community cluster" container: - image: docker.io/library/golang:1.18 + image: docker.io/library/golang:latest cpu: 1 memory: 1 @@ -137,8 +136,7 @@ cross_build_task: name: "Cross Compile" alias: cross_build only_if: >- - $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' && - $CIRRUS_CRON != 'multiarch' + $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' osx_instance: image: ghcr.io/cirruslabs/macos-ventura-base:latest @@ -160,8 +158,7 @@ unit_task: alias: unit only_if: ¬_build_docs >- $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' && - $CIRRUS_CHANGE_TITLE !=~ '.*CI:BUILD.*' && - $CIRRUS_CRON != 'multiarch' + $CIRRUS_CHANGE_TITLE !=~ '.*CI:BUILD.*' depends_on: &smoke_vendor_cross - smoke - vendor @@ -192,7 +189,7 @@ conformance_task: gce_instance: image_name: "${DEBIAN_CACHE_IMAGE_NAME}" - timeout_in: 25m + timeout_in: 65m matrix: - env: @@ -322,52 +319,6 @@ in_podman_task: <<: *standardlogs -image_build_task: &image-build - name: "Build multi-arch $FLAVOR" - alias: image_build - # Some of these container images take > 1h to build, limit - # this task to a specific Cirrus-Cron entry with this name. - only_if: $CIRRUS_CRON == 'multiarch' - depends_on: - - smoke - timeout_in: 120m # emulation is sssllllooooowwww - gce_instance: - <<: *standardvm - image_name: build-push-${IMAGE_SUFFIX} - # More muscle required for parallel multi-arch build - type: "n2-standard-4" - matrix: - - env: - FLAVOR: upstream - - env: - FLAVOR: testing - - env: - FLAVOR: stable - env: - DISTRO_NV: "${FEDORA_NAME}" # Required for repo cache extraction - BUILDAH_USERNAME: ENCRYPTED[70e1d4f026cba5d82fc067944baab10f7c71c64bb6b75fce4eeb5c106694b3bbc8e08f8a1b926d6e03e85cf4e21833bb] - BUILDAH_PASSWORD: ENCRYPTED[2dc7f4f623bfc856e1d5030df263b9e48ddab39abacea7a8bc714179c188df15fc0a5bb5d3414a24637d4e39aa51b7b5] - CONTAINERS_USERNAME: ENCRYPTED[88cd93c753f78d70e4beb5dbebd4402d682daf45793d7e0fe8b75b358f768e8734aef3f130ffb4ebca9bdea8d220a188] - CONTAINERS_PASSWORD: ENCRYPTED[886cf4cc126e50b2fd7f2792235a22bb79e4b81db43f803a6214a38d3fd6c04cd4e64570b562cb32b04e5fbc435404b6] - main_script: - - source /etc/automation_environment - - main.sh $CIRRUS_REPO_CLONE_URL contrib/buildahimage $FLAVOR - - -test_image_build_task: - <<: *image-build - alias: test_image_build - # Allow this to run inside a PR w/ [CI:BUILD] only. - only_if: $CIRRUS_PR != '' && $CIRRUS_CHANGE_TITLE =~ '.*CI:BUILD.*' - # This takes a LONG time, only run when requested. N/B: Any task - # made to depend on this one will block FOREVER unless triggered. - # DO NOT ADD THIS TASK AS DEPENDENCY FOR `success_task`. - trigger_type: manual - # Overwrite all 'env', don't push anything, just do the build. - env: - DRYRUN: 1 - - # Status aggregator for all tests. This task simply ensures a defined # set of tasks all passed, and allows confirming that based on the status # of this task. @@ -384,7 +335,6 @@ success_task: - cross_build - integration - in_podman - - image_build container: image: "quay.io/libpod/alpine:latest" diff --git a/vendor/github.com/containers/buildah/.gitignore b/vendor/github.com/containers/buildah/.gitignore index 939ce6ef..e8dee79c 100644 --- a/vendor/github.com/containers/buildah/.gitignore +++ b/vendor/github.com/containers/buildah/.gitignore @@ -10,3 +10,5 @@ Dockerfile* !/tests/conformance/**/Dockerfile* *.swp /result/ +internal/mkcw/embed/entrypoint.o +internal/mkcw/embed/entrypoint diff --git a/vendor/github.com/containers/buildah/.packit.sh b/vendor/github.com/containers/buildah/.packit.sh deleted file mode 100644 index c5d2e676..00000000 --- a/vendor/github.com/containers/buildah/.packit.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env bash - -# This script handles any custom processing of the spec file generated using the `post-upstream-clone` -# action and gets used by the fix-spec-file action in .packit.yaml. - -set -eo pipefail - -# Get Version from define/types.go in HEAD -VERSION=$(grep ^$'\tVersion' define/types.go | cut -d\" -f2 | sed -e 's/-/~/') - -# Generate source tarball from HEAD -git archive --prefix=buildah-$VERSION/ -o buildah-$VERSION.tar.gz HEAD - -# RPM Spec modifications - -# Use the Version from define/types.go in rpm spec -sed -i "s/^Version:.*/Version: $VERSION/" buildah.spec - -# Use Packit's supplied variable in the Release field in rpm spec. -# buildah.spec is generated using `rpkg spec --outdir ./` as mentioned in the -# `post-upstream-clone` action in .packit.yaml. -sed -i "s/^Release:.*/Release: $PACKIT_RPMSPEC_RELEASE%{?dist}/" buildah.spec - -# Use above generated tarball as Source in rpm spec -sed -i "s/^Source:.*.tar.gz/Source: buildah-$VERSION.tar.gz/" buildah.spec - -# Use the right build dir for autosetup stage in rpm spec -sed -i "s/^%setup.*/%autosetup -Sgit -n %{name}-$VERSION/" buildah.spec diff --git a/vendor/github.com/containers/buildah/.packit.yaml b/vendor/github.com/containers/buildah/.packit.yaml index ab262a00..76b297c1 100644 --- a/vendor/github.com/containers/buildah/.packit.yaml +++ b/vendor/github.com/containers/buildah/.packit.yaml @@ -2,29 +2,53 @@ # See the documentation for more information: # https://packit.dev/docs/configuration/ -# Build targets can be found at: -# https://copr.fedorainfracloud.org/coprs/rhcontainerbot/packit-builds/ +specfile_path: rpm/buildah.spec +upstream_tag_template: v{version} -specfile_path: buildah.spec +srpm_build_deps: + - make jobs: - - &copr - job: copr_build + - job: copr_build trigger: pull_request - owner: rhcontainerbot - project: packit-builds + notifications: + failure_comment: + message: "Ephemeral COPR build failed. @containers/packit-build please check." enable_net: true - srpm_build_deps: - - make - - rpkg - actions: - post-upstream-clone: - - "rpkg spec --outdir ./" - fix-spec-file: - - "bash .packit.sh" + targets: + - fedora-all-x86_64 + - fedora-all-aarch64 + - fedora-eln-x86_64 + - fedora-eln-aarch64 + - centos-stream+epel-next-8-x86_64 + - centos-stream+epel-next-8-aarch64 + - centos-stream+epel-next-9-x86_64 + - centos-stream+epel-next-9-aarch64 + additional_repos: + - "copr://rhcontainerbot/podman-next" - - <<: *copr - # Run on commit to main branch + # Run on commit to main branch + - job: copr_build trigger: commit - branch: main + notifications: + failure_comment: + message: "podman-next COPR build failed. @containers/packit-build please check." + owner: rhcontainerbot project: podman-next + enable_net: true + + - job: propose_downstream + trigger: release + update_release: false + dist_git_branches: + - fedora-all + + - job: koji_build + trigger: commit + dist_git_branches: + - fedora-all + + - job: bodhi_update + trigger: commit + dist_git_branches: + - fedora-branched # rawhide updates are created automatically diff --git a/vendor/github.com/containers/buildah/CHANGELOG.md b/vendor/github.com/containers/buildah/CHANGELOG.md index e69f7782..c46eb3ed 100644 --- a/vendor/github.com/containers/buildah/CHANGELOG.md +++ b/vendor/github.com/containers/buildah/CHANGELOG.md @@ -2,6 +2,292 @@ # Changelog +## v1.33.7 (2024-03-18) + + CVE-2024-1753 container escape fix + tests: skip_if_no_unshare(): check for --setuid + +## v1.33.6 (2024-02-14) + + [release-1.33] Bump golang.org/x/crypto v0.17.0 + +## v1.33.5 (2024-02-01) + + Bump c/common to v0.57.4, moby/buildkit v0.5.12 + +## v1.33.4 (2024-01-30) + + Bump c/image to v5.29.2 and c/common to v0.57.3 + +## v1.33.3 (2024-01-18) + + Bump c/common to 0.57.2 and c/image to 5.29.1 + +## v1.33.2 (2023-11-22) + + Update minimum to golang 1.20 + fix(deps): update module github.com/fsouza/go-dockerclient to v1.10.0 + fix(deps): update module github.com/moby/buildkit to v0.12.3 + Bump to v1.33.2-dev + +## v1.33.1 (2023-11-18) + + fix(deps): update module github.com/moby/buildkit to v0.11.4 [security] + test,heredoc: use fedora instead of docker.io/library/python:latest + Bump to v1.33.1-dev + +## v1.33.0 (2023-11-17) + + Never omit layers for emptyLayer instructions when squashing/cwing + Add OverrideChanges and OverrideConfig to CommitOptions + buildah: add heredoc support for RUN, COPY and ADD + vendor: bump imagebuilder to v1.2.6-0.20231110114814-35a50d57f722 + conformance tests: archive the context directory as 0:0 (#5171) + blobcacheinfo,test: blobs must be resued when pushing across registry + Bump c/storage v1.51.0, c/image v5.29.0, c/common v0.57.0 + pkg/util.MirrorToTempFileIfPathIsDescriptor(): don't leak an fd + StageExecutor.Execute: force a commit for --unsetenv, too + Increase a copier+chroot test timeout + Add support for --compat-auth-file in login/logout + Update existing tests for error message change + Update c/image and c/common to latest + fix(deps): update module github.com/containerd/containerd to v1.7.9 + build: downgrade to go 1.20 + Add godoc for pkg/parse.GetTempDir + conformance tests: use go-dockerclient for BuildKit builds + Make TEE types case-insensitive + fix(deps): update module golang.org/x/crypto to v0.15.0 + Tweak some help descriptions + Stop using DefaultNetworkSysctl and use containers.conf only + Implement ADD checksum flag #5135 + vendor of openshift/imagebuilder #5135 + Pass secrets from the host down to internal podman containers + Update cirrus and version of golang + image: replace GetStoreImage with ResolveReference + vendor: bump c/image to 373c52a9466f + pkg/parse.Platform(): minor simplification + createConfigsAndManifests: clear history before cw-specific logic + Use a constant definition instead of "scratch" + conformance: use require.NoErrorf() more + fix(deps): update module golang.org/x/term to v0.14.0 + fix(deps): update module golang.org/x/sync to v0.5.0 + fix(deps): update module github.com/spf13/cobra to v1.8.0 + fix(deps): update module golang.org/x/sys to v0.14.0 + fix(deps): update github.com/containers/common digest to 8354404 + fix(deps): update module github.com/opencontainers/runc to v1.1.10 + fix(deps): update github.com/containers/luksy digest to b5a7f79 + Log the platform for build errors during multi-platform builds + Use mask definitions from containers/common + Vendor in latest containers/common + fix(deps): update module github.com/containerd/containerd to v1.7.8 + fix(deps): update module go.etcd.io/bbolt to v1.3.8 + container.conf: support attributed string slices + fix(deps): update module sigs.k8s.io/yaml to v1.4.0 + Use cutil.StringInSlice rather then contains + Add --no-hostname option to buildah containers + vendor c/common: appendable containers.conf strings, Part 1 + fix(deps): update module github.com/onsi/gomega to v1.28.1 + chroot.setupChrootBindMounts: pay more attention to flags + chore(deps): update dependency containers/automation_images to v20231004 + Vendor containers/common + chore(deps): update module golang.org/x/net to v0.17.0 [security] + run: use internal.GetTempDir with os.MkdirTemp + fix(deps): update module github.com/containerd/containerd to v1.7.7 + imagebuildah,multi-stage: do not remove base images + gitignore: add mkcw binary + mkcw: remove entrypoint binaries + fix(deps): update module golang.org/x/crypto to v0.14.0 + fix(deps): update module golang.org/x/sys to v0.13.0 + fix(deps): update module golang.org/x/sync to v0.4.0 + Update some comments related to confidential workload + Use the parent's image ID in the config that we pass to imagebuilder + fix(deps): update github.com/containers/common digest to 8892536 + fix(deps): update github.com/containers/luksy digest to 6df88cb + bug: Ensure the mount type is always BindMount by default + Protocol can be specified with --port. Ex. --port 514/udp + fix(deps): update module github.com/onsi/gomega to v1.28.0 + build,config: add support for --unsetlabel + tests/bud: add tests + [CI:BUILD] Packit: tag @containers/packit-build on copr build failures + stage_executor: allow images without layers + vendor of containers/common + Removing selinux_tag.sh as no longer needed after 580356f [NO NEW TESTS NEEDED] + add/copy: make sure we handle relative path names correctly + fix(deps): update module github.com/opencontainers/image-spec to v1.1.0-rc5 + Bump to v1.33.0-dev + imagebuildah: consider ignorefile with --build-context + +## v1.32.0 (2023-09-14) + + GetTmpDir is not using ImageCopyTmpdir correctly + Run codespell on code + Bump vendor containers/(common, storage, image) + Cirrus: Remove multi-arch buildah image builds + fix(deps): update module github.com/containerd/containerd to v1.7.6 + Split GetTempDir from internal/util + Move most of internal/parse to internal/volumes + copier: remove libimage dependency via util package + Add some docs for `build --cw`, `commit --cw`, and `mkcw` + Add `buildah mkcw`, add `--cw` to `buildah commit` and `buildah build` + Make sure that pathnames picked up from the environment are absolute + fix(deps): update module github.com/cyphar/filepath-securejoin to v0.2.4 + fix(deps): update module github.com/docker/docker to v24.0.6+incompatible + Don't try to look up names when committing images + fix(deps): update module golang.org/x/crypto to v0.13.0 + docs: use valid github repo + fix(deps): update module golang.org/x/sys to v0.12.0 + vendor containers/common@12405381ff45 + push: --force-compression should be true with --compression-format + Update module github.com/containerd/containerd to v1.7.5 + [skip-ci] Update tim-actions/commit-message-checker-with-regex action to v0.3.2 + docs: add reference to oci-hooks + Support passing of ULimits as -1 to mean max + GHA: Attempt to fix discussion_lock workflow + Fixing the owner of the storage.conf. + pkg/chrootuser: Ignore comments when parsing /etc/group on FreeBSD + Use buildah repo rather then podman repo + GHA: Closed issue/PR comment-lock test + fix(deps): update module github.com/containers/storage to v1.49.0 + chore(deps): update dependency containers/automation_images to v20230816 + Replace troff code with markdown in buildah-{copy,add}.1.md + [CI:BUILD] rpm: spdx compatible license field + executor: build-arg warnings must honor global args + fix(deps): update module github.com/containers/ocicrypt to v1.1.8 + chroot: `setSeccomp` add support for `ArchPARISC(64)` and `ArchRISCV64` + make,cross: restore loong64 + Clear CommonBuildOpts when loading Builder status + buildah/push/manifest-push: add support for --force-compression + vendor: bump c/common to v0.55.1-0.20230811093040-524b4d5c12f9 + chore(deps): update dependency containers/automation_images to v20230809 + [CI:BUILD] RPM: fix buildtags + fix(deps): update module github.com/opencontainers/runc to v1.1.9 + chore(deps): update dependency ubuntu to v22 + chore(deps): update dependency containers/automation_images to v20230807 + [CI:BUILD] Packit: add fedora-eln targets + [CI:BUILD] RPM: build docs with vendored go-md2man + packit: Build PRs into default packit COPRs + Update install.md + Update install.md changes current Debian stable version name + fix(deps): update module golang.org/x/term to v0.11.0 + fix(deps): update module golang.org/x/crypto to v0.12.0 + tests: fix layer-label tests + buildah: add --layer-label for setting labels on layers + Cirrus: container/rootless env. var. passthrough + Cirrus: Remove duplicate env. var. definitions + fix(deps): update github.com/containers/storage digest to c3da76f + Add a missing .Close() call on an ImageSource + Create only a reference when that's all we need + Add a missing .Close() call on an ImageDestination + CI:BUILD] RPM: define gobuild macro for rhel/centos stream + manifest/push: add support for --add-compression + manifest/inspect: add support for tls-verify and authfile + vendor: bump c/common to v0.55.1-0.20230727095721-647ed1d4d79a + vendor: bump c/image to v5.26.1-0.20230726142307-8c387a14f4ac + fix(deps): update module github.com/containerd/containerd to v1.7.3 + fix(deps): update module github.com/onsi/gomega to v1.27.10 + fix(deps): update module github.com/docker/docker to v24.0.5+incompatible + fix(deps): update module github.com/containers/image/v5 to v5.26.1 + fix(deps): update module github.com/opencontainers/runtime-spec to v1.1.0 + Update vendor of containers/(storage,image,common) + fix(deps): update module github.com/opencontainers/runc to v1.1.8 + [CI:BUILD] Packit: remove pre-sync action + fix(deps): update module github.com/containers/common to v0.55.2 + [CI:BUILD] Packit: downstream task script needs GOPATH + Vendor in containers/(common, image, storage) + fix(deps): update module golang.org/x/term to v0.10.0 + [CI:BUILD] Packit: fix pre-sync action for downstream tasks + contrib/buildahimage: set config correctly for rootless build user + fix(deps): update module github.com/opencontainers/image-spec to v1.1.0-rc4 + Bump to v1.32.0-dev + Update debian install instructions + pkg/overlay: add limited support for FreeBSD + +## v1.31.0 (2023-06-30) + + Bump c/common to 0.55.1 and c/image to 5.26.1 + Bump c/image to 5.26.0 and c/common to 0.54.0 + vendor: update c/{common,image,storage} to latest + chore: pkg imported more than once + buildah: add pasta(1) support + use slirp4netns package from c/common + update c/common to latest + add hostname to /etc/hosts when running with host network + vendor: update c/common to latest + [CI:BUILD] Packit: add jobs for downstream Fedora package builds + fix(deps): update module golang.org/x/sync to v0.3.0 + fix(deps): update module golang.org/x/crypto to v0.10.0 + Add smoke tests for encryption CLI helpers + fix(deps): update module golang.org/x/term to v0.9.0 + fix(deps): update module github.com/opencontainers/runtime-spec to v1.1.0-rc.3 + Remove device mapper support + Remove use of deprecated tar.TypeRegA + Update tooling to support newer golangci-lint + Make cli.EncryptConfig,DecryptConfig, GetFormat public + Don't decrypt images by default + fix(deps): update module github.com/onsi/gomega to v1.27.8 + fix(deps): update github.com/containers/storage digest to 3f3fb2f + Renovate: Don't touch fragile test stuffs + [CI:DOCS] Update comment to remove ambiguity + fix(deps): update github.com/containers/image/v5 digest to abe5133 + fix(deps): update module github.com/sirupsen/logrus to v1.9.3 + fix(deps): update module github.com/containerd/containerd to v1.7.2 + Explicitly ref. quay images for CI + At startup, log the effective capabilities for debugging + parse: use GetTempDir from internal utils + GetTmpDir: honor image_copy_tmp_dir from containers.conf + docs/Makefile: don't show sed invocations + CI: Support testing w/ podman-next COPR packages + intermediate-images inherit-label test: make it debuggable + fix(deps): update github.com/containers/common digest to 462ccdd + Add a warning to `--secret` docs + vendor: bump c/storage to v1.46.2-0.20230526114421-55ee2d19292f + executor: apply label to only final stage + remove registry.centos.org + Go back to setting SysProcAttr.Pdeathsig for child processes + Fix auth.json path (validated on Fedora 38) wq Signed-off-by: Andreas Mack + fix(deps): update module github.com/stretchr/testify to v1.8.3 + CI: fix test broken by renovatebot + chore(deps): update quay.io/libpod/testimage docker tag to v20221018 + fix(deps): update module github.com/onsi/gomega to v1.27.7 + test: use debian instead of docker.io/library/debian:testing-slim + vendor: bump logrus to 1.9.2 + [skip-ci] Update tim-actions/get-pr-commits action to v1.3.0 + Revert "Proof of concept: nightly dependency treadmill" + fix(deps): update module github.com/sirupsen/logrus to v1.9.1 + vendor in containers/(common,storage,image) + fix(deps): update module github.com/docker/distribution to v2.8.2+incompatible + run: drop Pdeathsig + chroot: lock thread before setPdeathsig + tests: add a case for required=false + fix(deps): update module github.com/openshift/imagebuilder to v1.2.5 + build: validate volumes on backend + secret: accept required flag w/o value + fix(deps): update module github.com/containerd/containerd to v1.7.1 + fix(deps): update module golang.org/x/crypto to v0.9.0 + Update the demos README file to fix minor typos + fix(deps): update module golang.org/x/sync to v0.2.0 + fix(deps): update module golang.org/x/term to v0.8.0 + manifest, push: use source as destination if not specified + run,mount: remove path only if they didnt pre-exist + Cirrus: Fix meta task failing to find commit + parse: filter edge-case for podman-remote + fix(deps): update module github.com/opencontainers/runc to v1.1.7 + fix(deps): update module github.com/docker/docker to v23.0.5+incompatible + build: --platform must accept only arch + fix(deps): update module github.com/containers/common to v0.53.0 + makefile: increase conformance timeout + Cap suffixDigitsModulo to a 9-digits suffix. + Rename conflict to suffixDigitsModulo + fix(deps): update module github.com/opencontainers/runtime-spec to v1.1.0-rc.2 + fix(deps): update module github.com/opencontainers/runc to v1.1.6 + chore(deps): update centos docker tag to v8 + Clarify the need for qemu-user-static package + chore(deps): update quay.io/centos/centos docker tag to v8 + Renovate: Ensure test/tools/go.mod is managed + Revert "buildah image should not enable fuse-overlayfs for rootful mode" + Bump to v1.31.0-dev + parse: add support for relabel bind mount option + ## v1.30.0 (2023-04-06) fix(deps): update module github.com/opencontainers/runc to v1.1.5 diff --git a/vendor/github.com/containers/buildah/Makefile b/vendor/github.com/containers/buildah/Makefile index 508c95c2..85b43c7b 100644 --- a/vendor/github.com/containers/buildah/Makefile +++ b/vendor/github.com/containers/buildah/Makefile @@ -1,7 +1,7 @@ export GOPROXY=https://proxy.golang.org APPARMORTAG := $(shell hack/apparmor_tag.sh) -STORAGETAGS := $(shell ./btrfs_tag.sh) $(shell ./btrfs_installed_tag.sh) $(shell ./libdm_tag.sh) $(shell ./hack/libsubid_tag.sh) +STORAGETAGS := exclude_graphdriver_devicemapper $(shell ./btrfs_tag.sh) $(shell ./btrfs_installed_tag.sh) $(shell ./hack/libsubid_tag.sh) SECURITYTAGS ?= seccomp $(APPARMORTAG) TAGS ?= $(SECURITYTAGS) $(STORAGETAGS) $(shell ./hack/systemd_tag.sh) BUILDTAGS += $(TAGS) @@ -27,7 +27,7 @@ RACEFLAGS := $(shell $(GO_TEST) -race ./pkg/dummy > /dev/null 2>&1 && echo -race COMMIT_NO ?= $(shell git rev-parse HEAD 2> /dev/null || true) GIT_COMMIT ?= $(if $(shell git status --porcelain --untracked-files=no),${COMMIT_NO}-dirty,${COMMIT_NO}) SOURCE_DATE_EPOCH ?= $(if $(shell date +%s),$(shell date +%s),$(error "date failed")) -STATIC_STORAGETAGS = "containers_image_openpgp exclude_graphdriver_devicemapper $(STORAGE_TAGS)" +STATIC_STORAGETAGS = "containers_image_openpgp $(STORAGE_TAGS)" # we get GNU make 3.x in MacOS build envs, which wants # to be escaped in # strings, while the 4.x we have on Linux doesn't. this is the documented @@ -39,7 +39,7 @@ LIBSECCOMP_COMMIT := release-2.3 EXTRA_LDFLAGS ?= BUILDAH_LDFLAGS := $(GO_LDFLAGS) '-X main.GitCommit=$(GIT_COMMIT) -X main.buildInfo=$(SOURCE_DATE_EPOCH) -X main.cniVersion=$(CNI_COMMIT) $(EXTRA_LDFLAGS)' -SOURCES=*.go imagebuildah/*.go bind/*.go chroot/*.go copier/*.go define/*.go docker/*.go internal/parse/*.go internal/source/*.go internal/util/*.go manifests/*.go pkg/chrootuser/*.go pkg/cli/*.go pkg/completion/*.go pkg/formats/*.go pkg/overlay/*.go pkg/parse/*.go pkg/rusage/*.go pkg/sshagent/*.go pkg/umask/*.go pkg/util/*.go util/*.go +SOURCES=*.go imagebuildah/*.go bind/*.go chroot/*.go copier/*.go define/*.go docker/*.go internal/config/*.go internal/mkcw/*.go internal/mkcw/types/*.go internal/parse/*.go internal/source/*.go internal/util/*.go manifests/*.go pkg/chrootuser/*.go pkg/cli/*.go pkg/completion/*.go pkg/formats/*.go pkg/overlay/*.go pkg/parse/*.go pkg/rusage/*.go pkg/sshagent/*.go pkg/umask/*.go pkg/util/*.go util/*.go LINTFLAGS ?= @@ -69,14 +69,26 @@ static: mkdir -p ./bin cp -rfp ./result/bin/* ./bin/ -bin/buildah: $(SOURCES) cmd/buildah/*.go +bin/buildah: $(SOURCES) cmd/buildah/*.go internal/mkcw/embed/entrypoint.gz $(GO_BUILD) $(BUILDAH_LDFLAGS) $(GO_GCFLAGS) "$(GOGCFLAGS)" -o $@ $(BUILDFLAGS) ./cmd/buildah +ifneq ($(shell as --version | grep x86_64),) +internal/mkcw/embed/entrypoint: internal/mkcw/embed/entrypoint.s + $(AS) -o $(patsubst %.s,%.o,$^) $^ + $(LD) -o $@ $(patsubst %.s,%.o,$^) + strip $@ +else +.PHONY: internal/mkcw/embed/entrypoint +endif + +internal/mkcw/embed/entrypoint.gz: internal/mkcw/embed/entrypoint + $(RM) $@ + gzip -k $^ + .PHONY: buildah buildah: bin/buildah -# TODO: remove `grep -v loong64` from `ALL_CROSS_TARGETS` once go.etcd.io/bbolt 1.3.7 is out. -ALL_CROSS_TARGETS := $(addprefix bin/buildah.,$(subst /,.,$(shell $(GO) tool dist list | grep -v loong64))) +ALL_CROSS_TARGETS := $(addprefix bin/buildah.,$(subst /,.,$(shell $(GO) tool dist list))) LINUX_CROSS_TARGETS := $(filter bin/buildah.linux.%,$(ALL_CROSS_TARGETS)) DARWIN_CROSS_TARGETS := $(filter bin/buildah.darwin.%,$(ALL_CROSS_TARGETS)) WINDOWS_CROSS_TARGETS := $(addsuffix .exe,$(filter bin/buildah.windows.%,$(ALL_CROSS_TARGETS))) @@ -113,7 +125,7 @@ gopath: test $(shell pwd) = $(shell cd ../../../../src/github.com/containers/buildah ; pwd) codespell: - codespell -S Makefile,buildah.spec.rpkg,AUTHORS,bin,vendor,.git,go.mod,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L uint,iff,od,erro -w + codespell -S Makefile,buildah.spec.rpkg,AUTHORS,bin,vendor,.git,go.mod,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L passt,bu,uint,iff,od,erro -w .PHONY: validate validate: install.tools @@ -167,7 +179,7 @@ install.runc: .PHONY: test-conformance test-conformance: - $(GO_TEST) -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" -cover -timeout 20m ./tests/conformance + $(GO_TEST) -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" -cover -timeout 60m ./tests/conformance .PHONY: test-integration test-integration: install.tools @@ -179,13 +191,14 @@ tests/testreport/testreport: tests/testreport/testreport.go .PHONY: test-unit test-unit: tests/testreport/testreport - $(GO_TEST) -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" -cover $(RACEFLAGS) $(shell $(GO) list ./... | grep -v vendor | grep -v tests | grep -v cmd) -timeout 45m + $(GO_TEST) -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" -cover $(RACEFLAGS) $(shell $(GO) list ./... | grep -v vendor | grep -v tests | grep -v cmd | grep -v chroot | grep -v copier) -timeout 45m + $(GO_TEST) -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" $(RACEFLAGS) ./chroot ./copier -timeout 60m tmp=$(shell mktemp -d) ; \ mkdir -p $$tmp/root $$tmp/runroot; \ $(GO_TEST) -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" -cover $(RACEFLAGS) ./cmd/buildah -args --root $$tmp/root --runroot $$tmp/runroot --storage-driver vfs --signature-policy $(shell pwd)/tests/policy.json --registries-conf $(shell pwd)/tests/registries.conf vendor-in-container: - podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src docker.io/library/golang:1.18 make vendor + podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src docker.io/library/golang:1.20 make vendor .PHONY: vendor vendor: diff --git a/vendor/github.com/containers/buildah/add.go b/vendor/github.com/containers/buildah/add.go index 1bb6c850..c61de5a4 100644 --- a/vendor/github.com/containers/buildah/add.go +++ b/vendor/github.com/containers/buildah/add.go @@ -22,6 +22,7 @@ import ( "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/idtools" "github.com/hashicorp/go-multierror" + digest "github.com/opencontainers/go-digest" "github.com/opencontainers/runc/libcontainer/userns" "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" @@ -35,6 +36,9 @@ type AddAndCopyOptions struct { // newly-added content, potentially overriding permissions which would // otherwise be set to 0:0. Chown string + // Checksum is a standard container digest string (e.g. :) + // and is the expected hash of the content being copied. + Checksum string // PreserveOwnership, if Chown is not set, tells us to avoid setting // ownership of copied items to 0:0, instead using whatever ownership // information is already set. Not meaningful for remote sources or @@ -77,7 +81,7 @@ func sourceIsRemote(source string) bool { } // getURL writes a tar archive containing the named content -func getURL(src string, chown *idtools.IDPair, mountpoint, renameTarget string, writer io.Writer, chmod *os.FileMode) error { +func getURL(src string, chown *idtools.IDPair, mountpoint, renameTarget string, writer io.Writer, chmod *os.FileMode, srcDigest digest.Digest) error { url, err := url.Parse(src) if err != nil { return err @@ -110,7 +114,7 @@ func getURL(src string, chown *idtools.IDPair, mountpoint, renameTarget string, } // Figure out the size of the content. size := response.ContentLength - responseBody := response.Body + var responseBody io.Reader = response.Body if size < 0 { // Create a temporary file and copy the content to it, so that // we can figure out how much content there is. @@ -130,6 +134,11 @@ func getURL(src string, chown *idtools.IDPair, mountpoint, renameTarget string, } responseBody = f } + var digester digest.Digester + if srcDigest != "" { + digester = srcDigest.Algorithm().Digester() + responseBody = io.TeeReader(responseBody, digester.Hash()) + } // Write the output archive. Set permissions for compatibility. tw := tar.NewWriter(writer) defer tw.Close() @@ -161,6 +170,12 @@ func getURL(src string, chown *idtools.IDPair, mountpoint, renameTarget string, return fmt.Errorf("writing content from %q to tar stream: %w", src, err) } + if digester != nil { + if responseDigest := digester.Digest(); responseDigest != srcDigest { + return fmt.Errorf("unexpected response digest for %q: %s, want %s", src, responseDigest, srcDigest) + } + } + return nil } @@ -392,9 +407,16 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption var wg sync.WaitGroup if sourceIsRemote(src) { pipeReader, pipeWriter := io.Pipe() + var srcDigest digest.Digest + if options.Checksum != "" { + srcDigest, err = digest.Parse(options.Checksum) + if err != nil { + return fmt.Errorf("invalid checksum flag: %w", err) + } + } wg.Add(1) go func() { - getErr = getURL(src, chownFiles, mountPoint, renameTarget, pipeWriter, chmodDirsFiles) + getErr = getURL(src, chownFiles, mountPoint, renameTarget, pipeWriter, chmodDirsFiles, srcDigest) pipeWriter.Close() wg.Done() }() @@ -441,6 +463,10 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption continue } + if options.Checksum != "" { + return fmt.Errorf("checksum flag is not supported for local sources") + } + // Dig out the result of running glob+stat on this source spec. var localSourceStat *copier.StatsForGlob for _, st := range localSourceStats { @@ -456,9 +482,11 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption // Iterate through every item that matched the glob. itemsCopied := 0 for _, glob := range localSourceStat.Globbed { - rel, err := filepath.Rel(contextDir, glob) - if err != nil { - return fmt.Errorf("computing path of %q relative to %q: %w", glob, contextDir, err) + rel := glob + if filepath.IsAbs(glob) { + if rel, err = filepath.Rel(contextDir, glob); err != nil { + return fmt.Errorf("computing path of %q relative to %q: %w", glob, contextDir, err) + } } if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { return fmt.Errorf("possible escaping context directory error: %q is outside of %q", glob, contextDir) diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go index 1f8f85cd..e4ed5dcd 100644 --- a/vendor/github.com/containers/buildah/buildah.go +++ b/vendor/github.com/containers/buildah/buildah.go @@ -349,10 +349,10 @@ type BuilderOptions struct { ProcessLabel string // MountLabel is the SELinux mount label associated with the container MountLabel string - // PreserveBaseImageAnn[otation]s indicates that we should preserve base - // image information that was present in our base image, instead of - // overwriting them with information about the base image itself. This - // is mainly useful as an internal implementation detail of multistage + // PreserveBaseImageAnns indicates that we should preserve base + // image information (Annotations) that are present in our base image, + // rather than overwriting them with information about the base image + // itself. Useful as an internal implementation detail of multistage // builds, and does not need to be set by most callers. PreserveBaseImageAnns bool } @@ -386,6 +386,11 @@ type ImportFromImageOptions struct { SystemContext *types.SystemContext } +// ConfidentialWorkloadOptions encapsulates options which control whether or not +// we output an image whose rootfs contains a LUKS-compatibly-encrypted disk image +// instead of the usual rootfs contents. +type ConfidentialWorkloadOptions = define.ConfidentialWorkloadOptions + // NewBuilder creates a new build container. func NewBuilder(ctx context.Context, store storage.Store, options BuilderOptions) (*Builder, error) { if options.CommonBuildOpts == nil { @@ -433,6 +438,9 @@ func OpenBuilder(store storage.Store, container string) (*Builder, error) { b.store = store b.fixupConfig(nil) b.setupLogger() + if b.CommonBuildOpts == nil { + b.CommonBuildOpts = &CommonBuildOptions{} + } return b, nil } @@ -469,6 +477,9 @@ func OpenBuilderByPath(store storage.Store, path string) (*Builder, error) { b.store = store b.fixupConfig(nil) b.setupLogger() + if b.CommonBuildOpts == nil { + b.CommonBuildOpts = &CommonBuildOptions{} + } return b, nil } if err != nil { @@ -506,6 +517,9 @@ func OpenAllBuilders(store storage.Store) (builders []*Builder, err error) { b.store = store b.setupLogger() b.fixupConfig(nil) + if b.CommonBuildOpts == nil { + b.CommonBuildOpts = &CommonBuildOptions{} + } builders = append(builders, b) continue } diff --git a/vendor/github.com/containers/buildah/buildah.spec.rpkg b/vendor/github.com/containers/buildah/buildah.spec.rpkg deleted file mode 100644 index da22154e..00000000 --- a/vendor/github.com/containers/buildah/buildah.spec.rpkg +++ /dev/null @@ -1,165 +0,0 @@ -# For automatic rebuilds in COPR - -# The following tag is to get correct syntax highlighting for this file in vim text editor -# vim: syntax=spec - -# Any additinoal comments should go below this line or else syntax highlighting -# may not work. - -# CAUTION: This is not a replacement for RPMs provided by your distro. -# Only intended to build and test the latest unreleased changes. - -%global with_debug 1 - -# RHEL 8's default %%gobuild macro doesn't account for the BUILDTAGS variable, so we -# set it separately here and do not depend on RHEL 8's go-srpm-macros package. -%if !0%{?fedora} && 0%{?rhel} <= 8 -%define gobuild(o:) GO111MODULE=off go build -buildmode pie -compiler gc -tags="rpm_crashtraceback libtrust_openssl ${BUILDTAGS:-}" -ldflags "-linkmode=external -compressdwarf=false ${LDFLAGS:-} -B 0x$(head -c20 /dev/urandom|od -An -tx1|tr -d ' \\n') -extldflags '%__global_ldflags'" -a -v -x %{?**}; -%endif - -%if 0%{?with_debug} -%global _find_debuginfo_dwz_opts %{nil} -%global _dwz_low_mem_die_limit 0 -%else -%global debug_package %{nil} -%endif - -%global provider github -%global provider_tld com -%global project containers -%global repo %{name} -# https://github.com/containers/%%{name} -%global import_path %{provider}.%{provider_tld}/%{project}/%{repo} -%global git0 https://%{import_path} - -Name: {{{ git_dir_name }}} -Epoch: 101 -Version: {{{ git_dir_version }}} -Release: 1%{?dist} -Summary: Manage Pods, Containers and Container Images -License: ASL 2.0 -URL: https://github.com/containers/buildah -VCS: {{{ git_dir_vcs }}} -Source: {{{ git_dir_pack }}} -BuildRequires: device-mapper-devel -BuildRequires: git-core -BuildRequires: golang -BuildRequires: glib2-devel -BuildRequires: glibc-static -BuildRequires: go-md2man -%if 0%{?fedora} || 0%{?rhel} >= 9 -BuildRequires: go-rpm-macros -%endif -BuildRequires: gpgme-devel -BuildRequires: libassuan-devel -BuildRequires: make -BuildRequires: ostree-devel -BuildRequires: shadow-utils-subid-devel -%if 0%{?fedora} && ! 0%{?rhel} -BuildRequires: btrfs-progs-devel -%endif -Requires: containers-common-extra >= 4:1-78 -%if 0%{?rhel} -BuildRequires: libseccomp-devel -%else -BuildRequires: libseccomp-static -%endif -Requires: libseccomp -Suggests: cpp - -%description -The %{name} package provides a command line tool which can be used to -* create a working container from scratch -or -* create a working container from an image as a starting point -* mount/umount a working container's root file system for manipulation -* save container's root file system layer to create a new image -* delete a working container or an image. - -%package tests -Summary: Tests for %{name} -Requires: %{name} = %{version}-%{release} -Requires: bats -Requires: bzip2 -Requires: podman -Requires: golang -Requires: jq -Requires: httpd-tools -Requires: openssl -Requires: nmap-ncat -Requires: git-daemon - -%description tests -%{summary} - -This package contains system tests for %{name} - -%prep -{{{ git_dir_setup_macro }}} - -%build -%set_build_flags -export GOPATH=$(pwd)/_build:$(pwd) -export CGO_CFLAGS=$CFLAGS -# These extra flags present in $CFLAGS have been skipped for now as they break the build -CGO_CFLAGS=$(echo $CGO_CFLAGS | sed 's/-flto=auto//g') -CGO_CFLAGS=$(echo $CGO_CFLAGS | sed 's/-Wp,D_GLIBCXX_ASSERTIONS//g') -CGO_CFLAGS=$(echo $CGO_CFLAGS | sed 's/-specs=\/usr\/lib\/rpm\/redhat\/redhat-annobin-cc1//g') - -%ifarch x86_64 -export CGO_CFLAGS+=" -m64 -mtune=generic -fcf-protection=full" -%endif - -mkdir _build -pushd _build -mkdir -p src/%{provider}.%{provider_tld}/%{project} -ln -s $(dirs +1 -l) src/%{import_path} -popd - -mv vendor src - -export CNI_VERSION=`grep '^# github.com/containernetworking/cni ' src/modules.txt | sed 's,.* ,,'` -export LDFLAGS="-X main.buildInfo=`date +%s` -X main.cniVersion=${CNI_VERSION}" - -export BUILDTAGS="$(hack/libsubid_tag.sh) seccomp selinux $(hack/systemd_tag.sh)" -%if 0%{?rhel} -export BUILDTAGS="$BUILDTAGS exclude_graphdriver_btrfs btrfs_noversion" -%endif - -%gobuild -o bin/%{name} %{import_path}/cmd/%{name} -%gobuild -o bin/imgtype %{import_path}/tests/imgtype -%gobuild -o bin/copy %{import_path}/tests/copy -GOMD2MAN=go-md2man %{__make} -C docs - -# This will copy the files generated by the `make` command above into -# the installable rpm package. -%install -export GOPATH=$(pwd)/_build:$(pwd):%{gopath} -make DESTDIR=%{buildroot} PREFIX=%{_prefix} install install.completions -make DESTDIR=%{buildroot} PREFIX=%{_prefix} -C docs install - -install -d -p %{buildroot}/%{_datadir}/%{name}/test/system -cp -pav tests/. %{buildroot}/%{_datadir}/%{name}/test/system -cp bin/imgtype %{buildroot}/%{_bindir}/%{name}-imgtype -cp bin/copy %{buildroot}/%{_bindir}/%{name}-copy - -rm -f %{buildroot}%{_mandir}/man5/{Containerfile.5*,containerignore.5*} - - -%files -%license LICENSE -%doc README.md -%{_bindir}/%{name} -%{_mandir}/man1/%{name}* -%dir %{_datadir}/bash-completion -%dir %{_datadir}/bash-completion/completions -%{_datadir}/bash-completion/completions/%{name} - -%files tests -%license LICENSE -%{_bindir}/%{name}-imgtype -%{_bindir}/%{name}-copy -%{_datadir}/%{name}/test - -%changelog -{{{ git_dir_changelog }}} diff --git a/vendor/github.com/containers/buildah/changelog.txt b/vendor/github.com/containers/buildah/changelog.txt index 1dea1c0f..ef2049f3 100644 --- a/vendor/github.com/containers/buildah/changelog.txt +++ b/vendor/github.com/containers/buildah/changelog.txt @@ -1,3 +1,283 @@ +- Changelog for v1.33.7 (2024-03-18) + * CVE-2024-1753 container escape fix + * tests: skip_if_no_unshare(): check for --setuid + +-Changelog for v1.33.6 (2024-02-14) + + * [release-1.33] Bump golang.org/x/crypto v0.17.0 + +-Changelog for v1.33.5 (2024-02-01) + + * Bump c/common to v0.57.4, moby/buildkit v0.5.12 + +-Changelog for v1.33.4 (2024-01-30) + + * Bump c/image to v5.29.2 and c/common to v0.57.3 + +-Changelog for v1.33.3 (2024-01-18) + + * Bump c/common to 0.57.2 and c/image to 5.29.1 + +- Changelog for v1.33.2 (2023-11-22) + * Update minimum to golang 1.20 + * fix(deps): update module github.com/fsouza/go-dockerclient to v1.10.0 + * fix(deps): update module github.com/moby/buildkit to v0.12.3 + * Bump to v1.33.2-dev + +- Changelog for v1.33.1 (2023-11-18) + * fix(deps): update module github.com/moby/buildkit to v0.11.4 [security] + * test,heredoc: use fedora instead of docker.io/library/python:latest + * Bump to v1.33.1-dev + +- Changelog for v1.33.0 (2023-11-17) + * Never omit layers for emptyLayer instructions when squashing/cwing + * Add OverrideChanges and OverrideConfig to CommitOptions + * buildah: add heredoc support for RUN, COPY and ADD + * vendor: bump imagebuilder to v1.2.6-0.20231110114814-35a50d57f722 + * conformance tests: archive the context directory as 0:0 (#5171) + * blobcacheinfo,test: blobs must be resued when pushing across registry + * Bump c/storage v1.51.0, c/image v5.29.0, c/common v0.57.0 + * pkg/util.MirrorToTempFileIfPathIsDescriptor(): don't leak an fd + * StageExecutor.Execute: force a commit for --unsetenv, too + * Increase a copier+chroot test timeout + * Add support for --compat-auth-file in login/logout + * Update existing tests for error message change + * Update c/image and c/common to latest + * fix(deps): update module github.com/containerd/containerd to v1.7.9 + * build: downgrade to go 1.20 + * Add godoc for pkg/parse.GetTempDir + * conformance tests: use go-dockerclient for BuildKit builds + * Make TEE types case-insensitive + * fix(deps): update module golang.org/x/crypto to v0.15.0 + * Tweak some help descriptions + * Stop using DefaultNetworkSysctl and use containers.conf only + * Implement ADD checksum flag #5135 + * vendor of openshift/imagebuilder #5135 + * Pass secrets from the host down to internal podman containers + * Update cirrus and version of golang + * image: replace GetStoreImage with ResolveReference + * vendor: bump c/image to 373c52a9466f + * pkg/parse.Platform(): minor simplification + * createConfigsAndManifests: clear history before cw-specific logic + * Use a constant definition instead of "scratch" + * conformance: use require.NoErrorf() more + * fix(deps): update module golang.org/x/term to v0.14.0 + * fix(deps): update module golang.org/x/sync to v0.5.0 + * fix(deps): update module github.com/spf13/cobra to v1.8.0 + * fix(deps): update module golang.org/x/sys to v0.14.0 + * fix(deps): update github.com/containers/common digest to 8354404 + * fix(deps): update module github.com/opencontainers/runc to v1.1.10 + * fix(deps): update github.com/containers/luksy digest to b5a7f79 + * Log the platform for build errors during multi-platform builds + * Use mask definitions from containers/common + * Vendor in latest containers/common + * fix(deps): update module github.com/containerd/containerd to v1.7.8 + * fix(deps): update module go.etcd.io/bbolt to v1.3.8 + * container.conf: support attributed string slices + * fix(deps): update module sigs.k8s.io/yaml to v1.4.0 + * Use cutil.StringInSlice rather then contains + * Add --no-hostname option to buildah containers + * vendor c/common: appendable containers.conf strings, Part 1 + * fix(deps): update module github.com/onsi/gomega to v1.28.1 + * chroot.setupChrootBindMounts: pay more attention to flags + * chore(deps): update dependency containers/automation_images to v20231004 + * Vendor containers/common + * chore(deps): update module golang.org/x/net to v0.17.0 [security] + * run: use internal.GetTempDir with os.MkdirTemp + * fix(deps): update module github.com/containerd/containerd to v1.7.7 + * imagebuildah,multi-stage: do not remove base images + * gitignore: add mkcw binary + * mkcw: remove entrypoint binaries + * fix(deps): update module golang.org/x/crypto to v0.14.0 + * fix(deps): update module golang.org/x/sys to v0.13.0 + * fix(deps): update module golang.org/x/sync to v0.4.0 + * Update some comments related to confidential workload + * Use the parent's image ID in the config that we pass to imagebuilder + * fix(deps): update github.com/containers/common digest to 8892536 + * fix(deps): update github.com/containers/luksy digest to 6df88cb + * bug: Ensure the mount type is always BindMount by default + * Protocol can be specified with --port. Ex. --port 514/udp + * fix(deps): update module github.com/onsi/gomega to v1.28.0 + * build,config: add support for --unsetlabel + * tests/bud: add tests + * [CI:BUILD] Packit: tag @containers/packit-build on copr build failures + * stage_executor: allow images without layers + * vendor of containers/common + * Removing selinux_tag.sh as no longer needed after 580356f [NO NEW TESTS NEEDED] + * add/copy: make sure we handle relative path names correctly + * fix(deps): update module github.com/opencontainers/image-spec to v1.1.0-rc5 + * Bump to v1.33.0-dev + * imagebuildah: consider ignorefile with --build-context + +- Changelog for v1.32.0 (2023-09-14) + * GetTmpDir is not using ImageCopyTmpdir correctly + * Run codespell on code + * Bump vendor containers/(common, storage, image) + * Cirrus: Remove multi-arch buildah image builds + * fix(deps): update module github.com/containerd/containerd to v1.7.6 + * Split GetTempDir from internal/util + * Move most of internal/parse to internal/volumes + * copier: remove libimage dependency via util package + * Add some docs for `build --cw`, `commit --cw`, and `mkcw` + * Add `buildah mkcw`, add `--cw` to `buildah commit` and `buildah build` + * Make sure that pathnames picked up from the environment are absolute + * fix(deps): update module github.com/cyphar/filepath-securejoin to v0.2.4 + * fix(deps): update module github.com/docker/docker to v24.0.6+incompatible + * Don't try to look up names when committing images + * fix(deps): update module golang.org/x/crypto to v0.13.0 + * docs: use valid github repo + * fix(deps): update module golang.org/x/sys to v0.12.0 + * vendor containers/common@12405381ff45 + * push: --force-compression should be true with --compression-format + * Update module github.com/containerd/containerd to v1.7.5 + * [skip-ci] Update tim-actions/commit-message-checker-with-regex action to v0.3.2 + * docs: add reference to oci-hooks + * Support passing of ULimits as -1 to mean max + * GHA: Attempt to fix discussion_lock workflow + * Fixing the owner of the storage.conf. + * pkg/chrootuser: Ignore comments when parsing /etc/group on FreeBSD + * Use buildah repo rather then podman repo + * GHA: Closed issue/PR comment-lock test + * fix(deps): update module github.com/containers/storage to v1.49.0 + * chore(deps): update dependency containers/automation_images to v20230816 + * Replace troff code with markdown in buildah-{copy,add}.1.md + * [CI:BUILD] rpm: spdx compatible license field + * executor: build-arg warnings must honor global args + * fix(deps): update module github.com/containers/ocicrypt to v1.1.8 + * chroot: `setSeccomp` add support for `ArchPARISC(64)` and `ArchRISCV64` + * make,cross: restore loong64 + * Clear CommonBuildOpts when loading Builder status + * buildah/push/manifest-push: add support for --force-compression + * vendor: bump c/common to v0.55.1-0.20230811093040-524b4d5c12f9 + * chore(deps): update dependency containers/automation_images to v20230809 + * [CI:BUILD] RPM: fix buildtags + * fix(deps): update module github.com/opencontainers/runc to v1.1.9 + * chore(deps): update dependency ubuntu to v22 + * chore(deps): update dependency containers/automation_images to v20230807 + * [CI:BUILD] Packit: add fedora-eln targets + * [CI:BUILD] RPM: build docs with vendored go-md2man + * packit: Build PRs into default packit COPRs + * Update install.md + * Update install.md changes current Debian stable version name + * fix(deps): update module golang.org/x/term to v0.11.0 + * fix(deps): update module golang.org/x/crypto to v0.12.0 + * tests: fix layer-label tests + * buildah: add --layer-label for setting labels on layers + * Cirrus: container/rootless env. var. passthrough + * Cirrus: Remove duplicate env. var. definitions + * fix(deps): update github.com/containers/storage digest to c3da76f + * Add a missing .Close() call on an ImageSource + * Create only a reference when that's all we need + * Add a missing .Close() call on an ImageDestination + * CI:BUILD] RPM: define gobuild macro for rhel/centos stream + * manifest/push: add support for --add-compression + * manifest/inspect: add support for tls-verify and authfile + * vendor: bump c/common to v0.55.1-0.20230727095721-647ed1d4d79a + * vendor: bump c/image to v5.26.1-0.20230726142307-8c387a14f4ac + * fix(deps): update module github.com/containerd/containerd to v1.7.3 + * fix(deps): update module github.com/onsi/gomega to v1.27.10 + * fix(deps): update module github.com/docker/docker to v24.0.5+incompatible + * fix(deps): update module github.com/containers/image/v5 to v5.26.1 + * fix(deps): update module github.com/opencontainers/runtime-spec to v1.1.0 + * Update vendor of containers/(storage,image,common) + * fix(deps): update module github.com/opencontainers/runc to v1.1.8 + * [CI:BUILD] Packit: remove pre-sync action + * fix(deps): update module github.com/containers/common to v0.55.2 + * [CI:BUILD] Packit: downstream task script needs GOPATH + * Vendor in containers/(common, image, storage) + * fix(deps): update module golang.org/x/term to v0.10.0 + * [CI:BUILD] Packit: fix pre-sync action for downstream tasks + * contrib/buildahimage: set config correctly for rootless build user + * fix(deps): update module github.com/opencontainers/image-spec to v1.1.0-rc4 + * Bump to v1.32.0-dev + * Update debian install instructions + * pkg/overlay: add limited support for FreeBSD + +- Changelog for v1.31.0 (2023-06-30) + * Bump c/common to 0.55.1 and c/image to 5.26.1 + * Bump c/image to 5.26.0 and c/common to 0.54.0 + * vendor: update c/{common,image,storage} to latest + * chore: pkg imported more than once + * buildah: add pasta(1) support + * use slirp4netns package from c/common + * update c/common to latest + * add hostname to /etc/hosts when running with host network + * vendor: update c/common to latest + * [CI:BUILD] Packit: add jobs for downstream Fedora package builds + * fix(deps): update module golang.org/x/sync to v0.3.0 + * fix(deps): update module golang.org/x/crypto to v0.10.0 + * Add smoke tests for encryption CLI helpers + * fix(deps): update module golang.org/x/term to v0.9.0 + * fix(deps): update module github.com/opencontainers/runtime-spec to v1.1.0-rc.3 + * Remove device mapper support + * Remove use of deprecated tar.TypeRegA + * Update tooling to support newer golangci-lint + * Make cli.EncryptConfig,DecryptConfig, GetFormat public + * Don't decrypt images by default + * fix(deps): update module github.com/onsi/gomega to v1.27.8 + * fix(deps): update github.com/containers/storage digest to 3f3fb2f + * Renovate: Don't touch fragile test stuffs + * [CI:DOCS] Update comment to remove ambiguity + * fix(deps): update github.com/containers/image/v5 digest to abe5133 + * fix(deps): update module github.com/sirupsen/logrus to v1.9.3 + * fix(deps): update module github.com/containerd/containerd to v1.7.2 + * Explicitly ref. quay images for CI + * At startup, log the effective capabilities for debugging + * parse: use GetTempDir from internal utils + * GetTmpDir: honor image_copy_tmp_dir from containers.conf + * docs/Makefile: don't show sed invocations + * CI: Support testing w/ podman-next COPR packages + * intermediate-images inherit-label test: make it debuggable + * fix(deps): update github.com/containers/common digest to 462ccdd + * Add a warning to `--secret` docs + * vendor: bump c/storage to v1.46.2-0.20230526114421-55ee2d19292f + * executor: apply label to only final stage + * remove registry.centos.org + * Go back to setting SysProcAttr.Pdeathsig for child processes + * Fix auth.json path (validated on Fedora 38) wq Signed-off-by: Andreas Mack + * fix(deps): update module github.com/stretchr/testify to v1.8.3 + * CI: fix test broken by renovatebot + * chore(deps): update quay.io/libpod/testimage docker tag to v20221018 + * fix(deps): update module github.com/onsi/gomega to v1.27.7 + * test: use debian instead of docker.io/library/debian:testing-slim + * vendor: bump logrus to 1.9.2 + * [skip-ci] Update tim-actions/get-pr-commits action to v1.3.0 + * Revert "Proof of concept: nightly dependency treadmill" + * fix(deps): update module github.com/sirupsen/logrus to v1.9.1 + * vendor in containers/(common,storage,image) + * fix(deps): update module github.com/docker/distribution to v2.8.2+incompatible + * run: drop Pdeathsig + * chroot: lock thread before setPdeathsig + * tests: add a case for required=false + * fix(deps): update module github.com/openshift/imagebuilder to v1.2.5 + * build: validate volumes on backend + * secret: accept required flag w/o value + * fix(deps): update module github.com/containerd/containerd to v1.7.1 + * fix(deps): update module golang.org/x/crypto to v0.9.0 + * Update the demos README file to fix minor typos + * fix(deps): update module golang.org/x/sync to v0.2.0 + * fix(deps): update module golang.org/x/term to v0.8.0 + * manifest, push: use source as destination if not specified + * run,mount: remove path only if they didnt pre-exist + * Cirrus: Fix meta task failing to find commit + * parse: filter edge-case for podman-remote + * fix(deps): update module github.com/opencontainers/runc to v1.1.7 + * fix(deps): update module github.com/docker/docker to v23.0.5+incompatible + * build: --platform must accept only arch + * fix(deps): update module github.com/containers/common to v0.53.0 + * makefile: increase conformance timeout + * Cap suffixDigitsModulo to a 9-digits suffix. + * Rename conflict to suffixDigitsModulo + * fix(deps): update module github.com/opencontainers/runtime-spec to v1.1.0-rc.2 + * fix(deps): update module github.com/opencontainers/runc to v1.1.6 + * chore(deps): update centos docker tag to v8 + * Clarify the need for qemu-user-static package + * chore(deps): update quay.io/centos/centos docker tag to v8 + * Renovate: Ensure test/tools/go.mod is managed + * Revert "buildah image should not enable fuse-overlayfs for rootful mode" + * Bump to v1.31.0-dev + * parse: add support for relabel bind mount option + - Changelog for v1.30.0 (2023-04-06) * fix(deps): update module github.com/opencontainers/runc to v1.1.5 * fix(deps): update module github.com/fsouza/go-dockerclient to v1.9.7 diff --git a/vendor/github.com/containers/buildah/chroot/run_common.go b/vendor/github.com/containers/buildah/chroot/run_common.go index e93af1e6..deda64fc 100644 --- a/vendor/github.com/containers/buildah/chroot/run_common.go +++ b/vendor/github.com/containers/buildah/chroot/run_common.go @@ -501,6 +501,10 @@ func runUsingChroot(spec *specs.Spec, bundlePath string, ctty *os.File, stdin io // Apologize for the namespace configuration that we're about to ignore. logNamespaceDiagnostics(spec) + // We need to lock the thread so that PR_SET_PDEATHSIG won't trigger if the current thread exits. + runtime.LockOSThread() + defer runtime.UnlockOSThread() + // Start the parent subprocess. cmd := unshare.Command(append([]string{runUsingChrootExecCommand}, spec.Process.Args...)...) setPdeathsig(cmd.Cmd) diff --git a/vendor/github.com/containers/buildah/chroot/run_linux.go b/vendor/github.com/containers/buildah/chroot/run_linux.go index cb116add..dae4b717 100644 --- a/vendor/github.com/containers/buildah/chroot/run_linux.go +++ b/vendor/github.com/containers/buildah/chroot/run_linux.go @@ -229,6 +229,29 @@ func createPlatformContainer(options runUsingChrootExecSubprocOptions) error { return errors.New("unsupported createPlatformContainer") } +func mountFlagsForFSFlags(fsFlags uintptr) uintptr { + var mountFlags uintptr + for _, mapping := range []struct { + fsFlag uintptr + mountFlag uintptr + }{ + {unix.ST_MANDLOCK, unix.MS_MANDLOCK}, + {unix.ST_NOATIME, unix.MS_NOATIME}, + {unix.ST_NODEV, unix.MS_NODEV}, + {unix.ST_NODIRATIME, unix.MS_NODIRATIME}, + {unix.ST_NOEXEC, unix.MS_NOEXEC}, + {unix.ST_NOSUID, unix.MS_NOSUID}, + {unix.ST_RDONLY, unix.MS_RDONLY}, + {unix.ST_RELATIME, unix.MS_RELATIME}, + {unix.ST_SYNCHRONOUS, unix.MS_SYNCHRONOUS}, + } { + if fsFlags&mapping.fsFlag == mapping.fsFlag { + mountFlags |= mapping.mountFlag + } + } + return mountFlags +} + func makeReadOnly(mntpoint string, flags uintptr) error { var fs unix.Statfs_t // Make sure it's read-only. @@ -236,7 +259,9 @@ func makeReadOnly(mntpoint string, flags uintptr) error { return fmt.Errorf("checking if directory %q was bound read-only: %w", mntpoint, err) } if fs.Flags&unix.ST_RDONLY == 0 { - if err := unix.Mount(mntpoint, mntpoint, "bind", flags|unix.MS_REMOUNT, ""); err != nil { + // All callers currently pass MS_RDONLY in "flags", but in case they stop doing + // that at some point in the future... + if err := unix.Mount(mntpoint, mntpoint, "bind", flags|unix.MS_RDONLY|unix.MS_REMOUNT|unix.MS_BIND, ""); err != nil { return fmt.Errorf("remounting %s in mount namespace read-only: %w", mntpoint, err) } } @@ -268,7 +293,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func( // Now bind mount all of those things to be under the rootfs's location in this // mount namespace. commonFlags := uintptr(unix.MS_BIND | unix.MS_REC | unix.MS_PRIVATE) - bindFlags := commonFlags | unix.MS_NODEV + bindFlags := commonFlags devFlags := commonFlags | unix.MS_NOEXEC | unix.MS_NOSUID | unix.MS_RDONLY procFlags := devFlags | unix.MS_NODEV sysFlags := devFlags | unix.MS_NODEV @@ -291,7 +316,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func( return undoBinds, fmt.Errorf("checking if directory %q was bound read-only: %w", subDev, err) } if fs.Flags&unix.ST_RDONLY == 0 { - if err := unix.Mount(subDev, subDev, "bind", devFlags|unix.MS_REMOUNT, ""); err != nil { + if err := unix.Mount(subDev, subDev, "bind", devFlags|unix.MS_REMOUNT|unix.MS_BIND, ""); err != nil { return undoBinds, fmt.Errorf("remounting /dev in mount namespace read-only: %w", err) } } @@ -351,7 +376,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func( } logrus.Debugf("bind mounted %q to %q", "/sys", filepath.Join(spec.Root.Path, "/sys")) - // Bind mount in everything we've been asked to mount. + // Bind, overlay, or tmpfs mount everything we've been asked to mount. for _, m := range spec.Mounts { // Skip anything that we just mounted. switch m.Destination { @@ -369,12 +394,12 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func( continue } } - // Skip anything that isn't a bind or tmpfs mount. + // Skip anything that isn't a bind or overlay or tmpfs mount. if m.Type != "bind" && m.Type != "tmpfs" && m.Type != "overlay" { logrus.Debugf("skipping mount of type %q on %q", m.Type, m.Destination) continue } - // If the target is there, we can just mount it. + // If the target is already there, we can just mount over it. var srcinfo os.FileInfo switch m.Type { case "bind": @@ -382,24 +407,22 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func( if err != nil { return undoBinds, fmt.Errorf("examining %q for mounting in mount namespace: %w", m.Source, err) } - case "overlay": - fallthrough - case "tmpfs": + case "overlay", "tmpfs": srcinfo, err = os.Stat("/") if err != nil { - return undoBinds, fmt.Errorf("examining / to use as a template for a %s: %w", m.Type, err) + return undoBinds, fmt.Errorf("examining / to use as a template for a %s mount: %w", m.Type, err) } } target := filepath.Join(spec.Root.Path, m.Destination) - // Check if target is a symlink + // Check if target is a symlink. stat, err := os.Lstat(target) - // If target is a symlink, follow the link and ensure the destination exists + // If target is a symlink, follow the link and ensure the destination exists. if err == nil && stat != nil && (stat.Mode()&os.ModeSymlink != 0) { target, err = copier.Eval(spec.Root.Path, m.Destination, copier.EvalOptions{}) if err != nil { return nil, fmt.Errorf("evaluating symlink %q: %w", target, err) } - // Stat the destination of the evaluated symlink + // Stat the destination of the evaluated symlink. _, err = os.Stat(target) } if err != nil { @@ -407,7 +430,8 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func( if !errors.Is(err, os.ErrNotExist) { return undoBinds, fmt.Errorf("examining %q for mounting in mount namespace: %w", target, err) } - // The target isn't there yet, so create it. + // The target isn't there yet, so create it. If the source is a directory, + // we need a directory, otherwise we need a non-directory (i.e., a file). if srcinfo.IsDir() { if err = os.MkdirAll(target, 0755); err != nil { return undoBinds, fmt.Errorf("creating mountpoint %q in mount namespace: %w", target, err) @@ -423,79 +447,94 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func( file.Close() } } + // Sort out which flags we're asking for, and what statfs() should be telling us + // if we successfully mounted with them. requestFlags := uintptr(0) - expectedFlags := uintptr(0) + expectedImportantFlags := uintptr(0) + importantFlags := uintptr(0) + possibleImportantFlags := uintptr(unix.ST_NODEV | unix.ST_NOEXEC | unix.ST_NOSUID | unix.ST_RDONLY) for _, option := range m.Options { switch option { case "nodev": requestFlags |= unix.MS_NODEV - expectedFlags |= unix.ST_NODEV + importantFlags |= unix.ST_NODEV + expectedImportantFlags |= unix.ST_NODEV case "dev": requestFlags &= ^uintptr(unix.MS_NODEV) - expectedFlags &= ^uintptr(unix.ST_NODEV) + importantFlags |= unix.ST_NODEV + expectedImportantFlags &= ^uintptr(unix.ST_NODEV) case "noexec": requestFlags |= unix.MS_NOEXEC - expectedFlags |= unix.ST_NOEXEC + importantFlags |= unix.ST_NOEXEC + expectedImportantFlags |= unix.ST_NOEXEC case "exec": requestFlags &= ^uintptr(unix.MS_NOEXEC) - expectedFlags &= ^uintptr(unix.ST_NOEXEC) + importantFlags |= unix.ST_NOEXEC + expectedImportantFlags &= ^uintptr(unix.ST_NOEXEC) case "nosuid": requestFlags |= unix.MS_NOSUID - expectedFlags |= unix.ST_NOSUID + importantFlags |= unix.ST_NOSUID + expectedImportantFlags |= unix.ST_NOSUID case "suid": requestFlags &= ^uintptr(unix.MS_NOSUID) - expectedFlags &= ^uintptr(unix.ST_NOSUID) + importantFlags |= unix.ST_NOSUID + expectedImportantFlags &= ^uintptr(unix.ST_NOSUID) case "ro": requestFlags |= unix.MS_RDONLY - expectedFlags |= unix.ST_RDONLY + importantFlags |= unix.ST_RDONLY + expectedImportantFlags |= unix.ST_RDONLY case "rw": requestFlags &= ^uintptr(unix.MS_RDONLY) - expectedFlags &= ^uintptr(unix.ST_RDONLY) + importantFlags |= unix.ST_RDONLY + expectedImportantFlags &= ^uintptr(unix.ST_RDONLY) } } switch m.Type { case "bind": - // Do the bind mount. - logrus.Debugf("bind mounting %q on %q", m.Destination, filepath.Join(spec.Root.Path, m.Destination)) - if err := unix.Mount(m.Source, target, "", bindFlags|requestFlags, ""); err != nil { + // Do the initial bind mount. We'll worry about the flags in a bit. + logrus.Debugf("bind mounting %q on %q %v", m.Destination, filepath.Join(spec.Root.Path, m.Destination), m.Options) + if err = unix.Mount(m.Source, target, "", bindFlags|requestFlags, ""); err != nil { return undoBinds, fmt.Errorf("bind mounting %q from host to %q in mount namespace (%q): %w", m.Source, m.Destination, target, err) } - if (requestFlags & unix.MS_RDONLY) != 0 { - if err = unix.Statfs(target, &fs); err != nil { - return undoBinds, fmt.Errorf("checking if directory %q was bound read-only: %w", target, err) - } - // we need to make sure these flags are maintained in the REMOUNT operation - additionalFlags := uintptr(fs.Flags) & (unix.MS_NOEXEC | unix.MS_NOSUID | unix.MS_NODEV) - if err := unix.Mount("", target, "", unix.MS_REMOUNT|unix.MS_BIND|unix.MS_RDONLY|additionalFlags, ""); err != nil { - return undoBinds, fmt.Errorf("setting flags on the bind mount %q from host to %q in mount namespace (%q): %w", m.Source, m.Destination, target, err) - } - } logrus.Debugf("bind mounted %q to %q", m.Source, target) case "tmpfs": - // Mount a tmpfs. - if err := mount.Mount(m.Source, target, m.Type, strings.Join(append(m.Options, "private"), ",")); err != nil { - return undoBinds, fmt.Errorf("mounting tmpfs to %q in mount namespace (%q, %q): %w", m.Destination, target, strings.Join(m.Options, ","), err) + // Mount a tmpfs. We'll worry about the flags in a bit. + if err = mount.Mount(m.Source, target, m.Type, strings.Join(append(m.Options, "private"), ",")); err != nil { + return undoBinds, fmt.Errorf("mounting tmpfs to %q in mount namespace (%q, %q): %w", m.Destination, target, strings.Join(append(m.Options, "private"), ","), err) } logrus.Debugf("mounted a tmpfs to %q", target) case "overlay": - // Mount a overlay. - if err := mount.Mount(m.Source, target, m.Type, strings.Join(append(m.Options, "private"), ",")); err != nil { - return undoBinds, fmt.Errorf("mounting overlay to %q in mount namespace (%q, %q): %w", m.Destination, target, strings.Join(m.Options, ","), err) + // Mount an overlay. We'll worry about the flags in a bit. + if err = mount.Mount(m.Source, target, m.Type, strings.Join(append(m.Options, "private"), ",")); err != nil { + return undoBinds, fmt.Errorf("mounting overlay to %q in mount namespace (%q, %q): %w", m.Destination, target, strings.Join(append(m.Options, "private"), ","), err) } logrus.Debugf("mounted a overlay to %q", target) } + // Time to worry about the flags. if err = unix.Statfs(target, &fs); err != nil { - return undoBinds, fmt.Errorf("checking if directory %q was bound read-only: %w", target, err) + return undoBinds, fmt.Errorf("checking if volume %q was mounted with requested flags: %w", target, err) } - if uintptr(fs.Flags)&expectedFlags != expectedFlags { - if err := unix.Mount(target, target, "bind", requestFlags|unix.MS_REMOUNT, ""); err != nil { - return undoBinds, fmt.Errorf("remounting %q in mount namespace with expected flags: %w", target, err) + effectiveImportantFlags := uintptr(fs.Flags) & importantFlags + if effectiveImportantFlags != expectedImportantFlags { + // Do a remount to try to get the desired flags to stick. + effectiveUnimportantFlags := uintptr(fs.Flags) & ^possibleImportantFlags + if err = unix.Mount(target, target, m.Type, unix.MS_REMOUNT|bindFlags|requestFlags|mountFlagsForFSFlags(effectiveUnimportantFlags), ""); err != nil { + return undoBinds, fmt.Errorf("remounting %q in mount namespace with flags %#x instead of %#x: %w", target, requestFlags, effectiveImportantFlags, err) + } + // Check if the desired flags stuck. + if err = unix.Statfs(target, &fs); err != nil { + return undoBinds, fmt.Errorf("checking if directory %q was remounted with requested flags %#x instead of %#x: %w", target, requestFlags, effectiveImportantFlags, err) + } + newEffectiveImportantFlags := uintptr(fs.Flags) & importantFlags + if newEffectiveImportantFlags != expectedImportantFlags { + return undoBinds, fmt.Errorf("unable to remount %q with requested flags %#x instead of %#x, just got %#x back", target, requestFlags, effectiveImportantFlags, newEffectiveImportantFlags) } } } // Set up any read-only paths that we need to. If we're running inside - // of a container, some of these locations will already be read-only. + // of a container, some of these locations will already be read-only, in + // which case can declare victory and move on. for _, roPath := range spec.Linux.ReadonlyPaths { r := filepath.Join(spec.Root.Path, roPath) target, err := filepath.EvalSymlinks(r) @@ -515,12 +554,13 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func( } return undoBinds, fmt.Errorf("checking if directory %q is already read-only: %w", target, err) } - if fs.Flags&unix.ST_RDONLY != 0 { + if fs.Flags&unix.ST_RDONLY == unix.ST_RDONLY { continue } - // Mount the location over itself, so that we can remount it as read-only. - roFlags := uintptr(unix.MS_NODEV | unix.MS_NOEXEC | unix.MS_NOSUID | unix.MS_RDONLY) - if err := unix.Mount(target, target, "", roFlags|unix.MS_BIND|unix.MS_REC, ""); err != nil { + // Mount the location over itself, so that we can remount it as read-only, making + // sure to preserve any combination of nodev/noexec/nosuid that's already in play. + roFlags := mountFlagsForFSFlags(uintptr(fs.Flags)) | unix.MS_RDONLY + if err := unix.Mount(target, target, "", bindFlags|roFlags, ""); err != nil { if errors.Is(err, os.ErrNotExist) { // No target, no problem. continue @@ -532,7 +572,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func( return undoBinds, fmt.Errorf("checking if directory %q was bound read-only: %w", target, err) } if fs.Flags&unix.ST_RDONLY == 0 { - if err := unix.Mount(target, target, "", roFlags|unix.MS_BIND|unix.MS_REMOUNT, ""); err != nil { + if err := unix.Mount(target, target, "", unix.MS_REMOUNT|unix.MS_RDONLY|bindFlags|mountFlagsForFSFlags(uintptr(fs.Flags)), ""); err != nil { return undoBinds, fmt.Errorf("remounting %q in mount namespace read-only: %w", target, err) } } @@ -541,6 +581,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func( return undoBinds, fmt.Errorf("checking if directory %q was remounted read-only: %w", target, err) } if fs.Flags&unix.ST_RDONLY == 0 { + // Still not read only. return undoBinds, fmt.Errorf("verifying that %q in mount namespace was remounted read-only: %w", target, err) } } @@ -578,7 +619,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func( if err = unix.Statfs(target, &statfs); err != nil { return undoBinds, fmt.Errorf("checking if directory %q is a mountpoint: %w", target, err) } - isReadOnly := statfs.Flags&unix.MS_RDONLY != 0 + isReadOnly := statfs.Flags&unix.ST_RDONLY == unix.ST_RDONLY // Check if any of the IDs we're mapping could read it. var stat unix.Stat_t if err = unix.Stat(target, &stat); err != nil { @@ -641,11 +682,11 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func( return undoBinds, fmt.Errorf("masking directory %q in mount namespace: %w", target, err) } if err = unix.Statfs(target, &fs); err != nil { - return undoBinds, fmt.Errorf("checking if directory %q was mounted read-only in mount namespace: %w", target, err) + return undoBinds, fmt.Errorf("checking if masked directory %q was mounted read-only in mount namespace: %w", target, err) } if fs.Flags&unix.ST_RDONLY == 0 { - if err = unix.Mount(target, target, "", roFlags|syscall.MS_REMOUNT, ""); err != nil { - return undoBinds, fmt.Errorf("making sure directory %q in mount namespace is read only: %w", target, err) + if err = unix.Mount(target, target, "", syscall.MS_REMOUNT|roFlags|mountFlagsForFSFlags(uintptr(fs.Flags)), ""); err != nil { + return undoBinds, fmt.Errorf("making sure masked directory %q in mount namespace is read only: %w", target, err) } } } diff --git a/vendor/github.com/containers/buildah/chroot/seccomp.go b/vendor/github.com/containers/buildah/chroot/seccomp.go index cd203411..e875f330 100644 --- a/vendor/github.com/containers/buildah/chroot/seccomp.go +++ b/vendor/github.com/containers/buildah/chroot/seccomp.go @@ -13,6 +13,8 @@ import ( "github.com/sirupsen/logrus" ) +const seccompAvailable = true + // setSeccomp sets the seccomp filter for ourselves and any processes that we'll start. func setSeccomp(spec *specs.Spec) error { logrus.Debugf("setting seccomp configuration") @@ -79,9 +81,11 @@ func setSeccomp(spec *specs.Spec) error { case specs.ArchS390X: return libseccomp.ArchS390X case specs.ArchPARISC: - /* fallthrough */ /* for now */ + return libseccomp.ArchPARISC case specs.ArchPARISC64: - /* fallthrough */ /* for now */ + return libseccomp.ArchPARISC64 + case specs.ArchRISCV64: + return libseccomp.ArchRISCV64 default: logrus.Errorf("unmappable arch %v", specArch) } diff --git a/vendor/github.com/containers/buildah/chroot/seccomp_freebsd.go b/vendor/github.com/containers/buildah/chroot/seccomp_freebsd.go index 02897da7..90e9f14c 100644 --- a/vendor/github.com/containers/buildah/chroot/seccomp_freebsd.go +++ b/vendor/github.com/containers/buildah/chroot/seccomp_freebsd.go @@ -7,6 +7,8 @@ import ( "github.com/opencontainers/runtime-spec/specs-go" ) +const seccompAvailable = false + func setSeccomp(spec *specs.Spec) error { // Ignore this on FreeBSD return nil diff --git a/vendor/github.com/containers/buildah/chroot/seccomp_unsupported.go b/vendor/github.com/containers/buildah/chroot/seccomp_unsupported.go index 5be2e48d..dc80dcd1 100644 --- a/vendor/github.com/containers/buildah/chroot/seccomp_unsupported.go +++ b/vendor/github.com/containers/buildah/chroot/seccomp_unsupported.go @@ -9,6 +9,8 @@ import ( "github.com/opencontainers/runtime-spec/specs-go" ) +const seccompAvailable = false + func setSeccomp(spec *specs.Spec) error { if spec.Linux.Seccomp != nil { return errors.New("configured a seccomp filter without seccomp support?") diff --git a/vendor/github.com/containers/buildah/commit.go b/vendor/github.com/containers/buildah/commit.go index 21ec6ea5..ef55e541 100644 --- a/vendor/github.com/containers/buildah/commit.go +++ b/vendor/github.com/containers/buildah/commit.go @@ -3,7 +3,6 @@ package buildah import ( "context" "encoding/json" - "errors" "fmt" "io" "os" @@ -22,7 +21,6 @@ import ( "github.com/containers/image/v5/transports" "github.com/containers/image/v5/types" encconfig "github.com/containers/ocicrypt/config" - "github.com/containers/storage" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/stringid" digest "github.com/opencontainers/go-digest" @@ -105,9 +103,21 @@ type CommitOptions struct { // integers in the slice represent 0-indexed layer indices, with support for negative // indexing. i.e. 0 is the first layer, -1 is the last (top-most) layer. OciEncryptLayers *[]int + // ConfidentialWorkloadOptions is used to force the output image's rootfs to contain a + // LUKS-compatibly encrypted disk image (for use with krun) instead of the usual + // contents of a rootfs. + ConfidentialWorkloadOptions ConfidentialWorkloadOptions // UnsetEnvs is a list of environments to not add to final image. // Deprecated: use UnsetEnv() before committing instead. UnsetEnvs []string + // OverrideConfig is an optional Schema2Config which can override parts + // of the working container's configuration for the image that is being + // committed. + OverrideConfig *manifest.Schema2Config + // OverrideChanges is a slice of Dockerfile-style instructions to make + // to the configuration of the image that is being committed, after + // OverrideConfig is applied. + OverrideChanges []string } var ( @@ -354,7 +364,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options if len(options.AdditionalTags) > 0 { switch dest.Transport().Name() { case is.Transport.Name(): - img, err := is.Transport.GetStoreImage(b.store, dest) + _, img, err := is.ResolveReference(dest) if err != nil { return imgID, nil, "", fmt.Errorf("locating just-written image %q: %w", transports.ImageName(dest), err) } @@ -367,11 +377,12 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options } } - img, err := is.Transport.GetStoreImage(b.store, dest) - if err != nil && !errors.Is(err, storage.ErrImageUnknown) { - return imgID, nil, "", fmt.Errorf("locating image %q in local storage: %w", transports.ImageName(dest), err) - } - if err == nil { + if dest.Transport().Name() == is.Transport.Name() { + dest2, img, err := is.ResolveReference(dest) + if err != nil { + return imgID, nil, "", fmt.Errorf("locating image %q in local storage: %w", transports.ImageName(dest), err) + } + dest = dest2 imgID = img.ID toPruneNames := make([]string, 0, len(img.Names)) for _, name := range img.Names { @@ -384,11 +395,6 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options return imgID, nil, "", fmt.Errorf("failed to remove temporary name from image %q: %w", imgID, err) } logrus.Debugf("removing %v from assigned names to image %q", nameToRemove, img.ID) - dest2, err := is.Transport.ParseStoreReference(b.store, "@"+imgID) - if err != nil { - return imgID, nil, "", fmt.Errorf("creating unnamed destination reference for image: %w", err) - } - dest = dest2 } if options.IIDFile != "" { if err = os.WriteFile(options.IIDFile, []byte("sha256:"+img.ID), 0644); err != nil { diff --git a/vendor/github.com/containers/buildah/convertcw.go b/vendor/github.com/containers/buildah/convertcw.go new file mode 100644 index 00000000..85576f42 --- /dev/null +++ b/vendor/github.com/containers/buildah/convertcw.go @@ -0,0 +1,217 @@ +package buildah + +import ( + "context" + "fmt" + "io" + "time" + + "github.com/containers/buildah/define" + "github.com/containers/buildah/internal/mkcw" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/types" + encconfig "github.com/containers/ocicrypt/config" + "github.com/containers/storage" + "github.com/containers/storage/pkg/archive" + "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" +) + +// CWConvertImageOptions provides both required and optional bits of +// configuration for CWConvertImage(). +type CWConvertImageOptions struct { + // Required parameters. + InputImage string + + // If supplied, we'll tag the resulting image with the specified name. + Tag string + OutputImage types.ImageReference + + // If supplied, we'll register the workload with this server. + // Practically necessary if DiskEncryptionPassphrase is not set, in + // which case we'll generate one and throw it away after. + AttestationURL string + + // Used to measure the environment. If left unset (0), defaults will be applied. + CPUs int + Memory int + + // Can be manually set. If left unset ("", false, nil), reasonable values will be used. + TeeType define.TeeType + IgnoreAttestationErrors bool + WorkloadID string + DiskEncryptionPassphrase string + Slop string + FirmwareLibrary string + BaseImage string + Logger *logrus.Logger + + // Passed through to BuilderOptions. Most settings won't make + // sense to be made available here because we don't launch a process. + ContainerSuffix string + PullPolicy PullPolicy + BlobDirectory string + SignaturePolicyPath string + ReportWriter io.Writer + IDMappingOptions *IDMappingOptions + Format string + MaxPullRetries int + PullRetryDelay time.Duration + OciDecryptConfig *encconfig.DecryptConfig + MountLabel string +} + +// CWConvertImage takes the rootfs and configuration from one image, generates a +// LUKS-encrypted disk image that more or less includes them both, and puts the +// result into a new container image. +// Returns the new image's ID and digest on success, along with a canonical +// reference for it if a repository name was specified. +func CWConvertImage(ctx context.Context, systemContext *types.SystemContext, store storage.Store, options CWConvertImageOptions) (string, reference.Canonical, digest.Digest, error) { + // Apply our defaults if some options aren't set. + logger := options.Logger + if logger == nil { + logger = logrus.StandardLogger() + } + + // Now create the target working container, pulling the base image if + // there is one and it isn't present. + builderOptions := BuilderOptions{ + FromImage: options.BaseImage, + SystemContext: systemContext, + Logger: logger, + + ContainerSuffix: options.ContainerSuffix, + PullPolicy: options.PullPolicy, + BlobDirectory: options.BlobDirectory, + SignaturePolicyPath: options.SignaturePolicyPath, + ReportWriter: options.ReportWriter, + IDMappingOptions: options.IDMappingOptions, + Format: options.Format, + MaxPullRetries: options.MaxPullRetries, + PullRetryDelay: options.PullRetryDelay, + OciDecryptConfig: options.OciDecryptConfig, + MountLabel: options.MountLabel, + } + target, err := NewBuilder(ctx, store, builderOptions) + if err != nil { + return "", nil, "", fmt.Errorf("creating container from target image: %w", err) + } + defer func() { + if err := target.Delete(); err != nil { + logrus.Warnf("deleting target container: %v", err) + } + }() + targetDir, err := target.Mount("") + if err != nil { + return "", nil, "", fmt.Errorf("mounting target container: %w", err) + } + defer func() { + if err := target.Unmount(); err != nil { + logrus.Warnf("unmounting target container: %v", err) + } + }() + + // Mount the source image, pulling it first if necessary. + builderOptions = BuilderOptions{ + FromImage: options.InputImage, + SystemContext: systemContext, + Logger: logger, + + ContainerSuffix: options.ContainerSuffix, + PullPolicy: options.PullPolicy, + BlobDirectory: options.BlobDirectory, + SignaturePolicyPath: options.SignaturePolicyPath, + ReportWriter: options.ReportWriter, + IDMappingOptions: options.IDMappingOptions, + Format: options.Format, + MaxPullRetries: options.MaxPullRetries, + PullRetryDelay: options.PullRetryDelay, + OciDecryptConfig: options.OciDecryptConfig, + MountLabel: options.MountLabel, + } + source, err := NewBuilder(ctx, store, builderOptions) + if err != nil { + return "", nil, "", fmt.Errorf("creating container from source image: %w", err) + } + defer func() { + if err := source.Delete(); err != nil { + logrus.Warnf("deleting source container: %v", err) + } + }() + sourceInfo := GetBuildInfo(source) + if err != nil { + return "", nil, "", fmt.Errorf("retrieving info about source image: %w", err) + } + sourceImageID := sourceInfo.FromImageID + sourceSize, err := store.ImageSize(sourceImageID) + if err != nil { + return "", nil, "", fmt.Errorf("computing size of source image: %w", err) + } + sourceDir, err := source.Mount("") + if err != nil { + return "", nil, "", fmt.Errorf("mounting source container: %w", err) + } + defer func() { + if err := source.Unmount(); err != nil { + logrus.Warnf("unmounting source container: %v", err) + } + }() + + // Generate the image contents. + archiveOptions := mkcw.ArchiveOptions{ + AttestationURL: options.AttestationURL, + CPUs: options.CPUs, + Memory: options.Memory, + TempDir: targetDir, + TeeType: options.TeeType, + IgnoreAttestationErrors: options.IgnoreAttestationErrors, + ImageSize: sourceSize, + WorkloadID: options.WorkloadID, + DiskEncryptionPassphrase: options.DiskEncryptionPassphrase, + Slop: options.Slop, + FirmwareLibrary: options.FirmwareLibrary, + Logger: logger, + } + rc, workloadConfig, err := mkcw.Archive(sourceDir, &source.OCIv1, archiveOptions) + if err != nil { + return "", nil, "", fmt.Errorf("generating encrypted image content: %w", err) + } + if err = archive.Untar(rc, targetDir, &archive.TarOptions{}); err != nil { + if err = rc.Close(); err != nil { + logger.Warnf("cleaning up: %v", err) + } + return "", nil, "", fmt.Errorf("saving encrypted image content: %w", err) + } + if err = rc.Close(); err != nil { + return "", nil, "", fmt.Errorf("cleaning up: %w", err) + } + + // Commit the image. Clear out most of the configuration (if there is any — we default + // to scratch as a base) so that an engine that doesn't or can't set up a TEE will just + // run the static entrypoint. The rest of the configuration which the runtime consults + // is in the .krun_config.json file in the encrypted filesystem. + logger.Log(logrus.DebugLevel, "committing disk image") + target.ClearAnnotations() + target.ClearEnv() + target.ClearLabels() + target.ClearOnBuild() + target.ClearPorts() + target.ClearVolumes() + target.SetCmd(nil) + target.SetCreatedBy(fmt.Sprintf(": convert %q for use with %q", sourceImageID, workloadConfig.Type)) + target.SetDomainname("") + target.SetEntrypoint([]string{"/entrypoint"}) + target.SetHealthcheck(nil) + target.SetHostname("") + target.SetMaintainer("") + target.SetShell(nil) + target.SetUser("") + target.SetWorkDir("") + commitOptions := CommitOptions{ + SystemContext: systemContext, + } + if options.Tag != "" { + commitOptions.AdditionalTags = append(commitOptions.AdditionalTags, options.Tag) + } + return target.Commit(ctx, options.OutputImage, commitOptions) +} diff --git a/vendor/github.com/containers/buildah/copier/copier.go b/vendor/github.com/containers/buildah/copier/copier.go index 7bfbf9d1..babab388 100644 --- a/vendor/github.com/containers/buildah/copier/copier.go +++ b/vendor/github.com/containers/buildah/copier/copier.go @@ -19,7 +19,6 @@ import ( "syscall" "time" - "github.com/containers/buildah/util" "github.com/containers/image/v5/pkg/compression" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/fileutils" @@ -1141,7 +1140,7 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa cb := func() error { tw := tar.NewWriter(bulkWriter) defer tw.Close() - hardlinkChecker := new(util.HardlinkChecker) + hardlinkChecker := new(hardlinkChecker) itemsCopied := 0 for i, item := range queue { // if we're not discarding the names of individual directories, keep track of this one @@ -1353,7 +1352,7 @@ func handleRename(rename map[string]string, name string) string { return name } -func copierHandlerGetOne(srcfi os.FileInfo, symlinkTarget, name, contentPath string, options GetOptions, tw *tar.Writer, hardlinkChecker *util.HardlinkChecker, idMappings *idtools.IDMappings) error { +func copierHandlerGetOne(srcfi os.FileInfo, symlinkTarget, name, contentPath string, options GetOptions, tw *tar.Writer, hardlinkChecker *hardlinkChecker, idMappings *idtools.IDMappings) error { // build the header using the name provided hdr, err := tar.FileInfoHeader(srcfi, symlinkTarget) if err != nil { @@ -1721,7 +1720,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM // no type flag for sockets default: return fmt.Errorf("unrecognized Typeflag %c", hdr.Typeflag) - case tar.TypeReg, tar.TypeRegA: + case tar.TypeReg: var written int64 written, err = createFile(path, tr) // only check the length if there wasn't an error, which we'll diff --git a/vendor/github.com/containers/buildah/util/util_not_uint64.go b/vendor/github.com/containers/buildah/copier/hardlink_not_uint64.go similarity index 68% rename from vendor/github.com/containers/buildah/util/util_not_uint64.go rename to vendor/github.com/containers/buildah/copier/hardlink_not_uint64.go index a318fb30..062f489f 100644 --- a/vendor/github.com/containers/buildah/util/util_not_uint64.go +++ b/vendor/github.com/containers/buildah/copier/hardlink_not_uint64.go @@ -1,6 +1,7 @@ +//go:build darwin || (linux && mips) || (linux && mipsle) || (linux && mips64) || (linux && mips64le) // +build darwin linux,mips linux,mipsle linux,mips64 linux,mips64le -package util +package copier import ( "syscall" diff --git a/vendor/github.com/containers/buildah/util/util_uint64.go b/vendor/github.com/containers/buildah/copier/hardlink_uint64.go similarity index 95% rename from vendor/github.com/containers/buildah/util/util_uint64.go rename to vendor/github.com/containers/buildah/copier/hardlink_uint64.go index e404690e..e739495c 100644 --- a/vendor/github.com/containers/buildah/util/util_uint64.go +++ b/vendor/github.com/containers/buildah/copier/hardlink_uint64.go @@ -1,7 +1,7 @@ //go:build (linux && !mips && !mipsle && !mips64 && !mips64le) || freebsd // +build linux,!mips,!mipsle,!mips64,!mips64le freebsd -package util +package copier import ( "syscall" diff --git a/vendor/github.com/containers/buildah/copier/hardlink_unix.go b/vendor/github.com/containers/buildah/copier/hardlink_unix.go new file mode 100644 index 00000000..fdc84db1 --- /dev/null +++ b/vendor/github.com/containers/buildah/copier/hardlink_unix.go @@ -0,0 +1,32 @@ +//go:build linux || darwin || freebsd +// +build linux darwin freebsd + +package copier + +import ( + "os" + "sync" + "syscall" +) + +type hardlinkDeviceAndInode struct { + device, inode uint64 +} + +type hardlinkChecker struct { + hardlinks sync.Map +} + +func (h *hardlinkChecker) Check(fi os.FileInfo) string { + if st, ok := fi.Sys().(*syscall.Stat_t); ok && fi.Mode().IsRegular() && st.Nlink > 1 { + if name, ok := h.hardlinks.Load(makeHardlinkDeviceAndInode(st)); ok && name.(string) != "" { + return name.(string) + } + } + return "" +} +func (h *hardlinkChecker) Add(fi os.FileInfo, name string) { + if st, ok := fi.Sys().(*syscall.Stat_t); ok && fi.Mode().IsRegular() && st.Nlink > 1 { + h.hardlinks.Store(makeHardlinkDeviceAndInode(st), name) + } +} diff --git a/vendor/github.com/containers/buildah/copier/hardlink_windows.go b/vendor/github.com/containers/buildah/copier/hardlink_windows.go new file mode 100644 index 00000000..ec71ccea --- /dev/null +++ b/vendor/github.com/containers/buildah/copier/hardlink_windows.go @@ -0,0 +1,17 @@ +//go:build !linux && !darwin +// +build !linux,!darwin + +package copier + +import ( + "os" +) + +type hardlinkChecker struct { +} + +func (h *hardlinkChecker) Check(fi os.FileInfo) string { + return "" +} +func (h *hardlinkChecker) Add(fi os.FileInfo, name string) { +} diff --git a/vendor/github.com/containers/buildah/define/build.go b/vendor/github.com/containers/buildah/define/build.go index 42c8fd72..95c9b910 100644 --- a/vendor/github.com/containers/buildah/define/build.go +++ b/vendor/github.com/containers/buildah/define/build.go @@ -64,6 +64,9 @@ type CommonBuildOptions struct { LabelOpts []string // MemorySwap limits the amount of memory and swap together. MemorySwap int64 + // NoHostname tells the builder not to create /etc/hostname content when running + // containers. + NoHostname bool // NoHosts tells the builder not to create /etc/hosts content when running // containers. NoHosts bool @@ -163,6 +166,10 @@ type BuildOptions struct { // It allows end user to export recently built rootfs into a directory or tar. // See the documentation of 'buildah build --output' for the details of the format. BuildOutput string + // ConfidentialWorkload controls whether or not, and if so, how, we produce an + // image that's meant to be run using krun as a VM instead of a conventional + // process-type container. + ConfidentialWorkload ConfidentialWorkloadOptions // Additional tags to add to the image that we write, if we know of a // way to add them. AdditionalTags []string @@ -244,6 +251,8 @@ type BuildOptions struct { Squash bool // Labels metadata for an image Labels []string + // LayerLabels metadata for an intermediate image + LayerLabels []string // Annotation metadata for an image Annotations []string // OnBuild commands to be run by images based on this image @@ -312,6 +321,8 @@ type BuildOptions struct { AllPlatforms bool // UnsetEnvs is a list of environments to not add to final image. UnsetEnvs []string + // UnsetLabels is a list of labels to not add to final image from base image. + UnsetLabels []string // Envs is a list of environment variables to set in the final image. Envs []string // OSFeatures specifies operating system features the image requires. diff --git a/vendor/github.com/containers/buildah/define/types.go b/vendor/github.com/containers/buildah/define/types.go index b4c5b07d..50adce01 100644 --- a/vendor/github.com/containers/buildah/define/types.go +++ b/vendor/github.com/containers/buildah/define/types.go @@ -29,7 +29,7 @@ const ( // identify working containers. Package = "buildah" // Version for the Package. Also used by .packit.sh for Packit builds. - Version = "1.30.0" + Version = "1.33.7" // DefaultRuntime if containers.conf fails. DefaultRuntime = "runc" @@ -47,11 +47,19 @@ const ( OCI = "oci" // DOCKER used to define the "docker" image format DOCKER = "docker" + + // SEV is a known trusted execution environment type: AMD-SEV (secure encrypted virtualization using encrypted state, requires epyc 1000 "naples") + SEV TeeType = "sev" + // SNP is a known trusted execution environment type: AMD-SNP (SEV secure nested pages) (requires epyc 3000 "milan") + SNP TeeType = "snp" ) +// TeeType is a supported trusted execution environment type. +type TeeType string + var ( - // DefaultCapabilities is the list of capabilities which we grant by - // default to containers which are running under UID 0. + // Deprecated: DefaultCapabilities values should be retrieved from + // github.com/containers/common/pkg/config DefaultCapabilities = []string{ "CAP_AUDIT_WRITE", "CAP_CHOWN", @@ -67,8 +75,8 @@ var ( "CAP_SETUID", "CAP_SYS_CHROOT", } - // DefaultNetworkSysctl is the list of Kernel parameters which we - // grant by default to containers which are running under UID 0. + // Deprecated: DefaultNetworkSysctl values should be retrieved from + // github.com/containers/common/pkg/config DefaultNetworkSysctl = map[string]string{ "net.ipv4.ping_group_range": "0 0", } @@ -105,6 +113,23 @@ type BuildOutputOption struct { IsStdout bool } +// ConfidentialWorkloadOptions encapsulates options which control whether or not +// we output an image whose rootfs contains a LUKS-compatibly-encrypted disk image +// instead of the usual rootfs contents. +type ConfidentialWorkloadOptions struct { + Convert bool + AttestationURL string + CPUs int + Memory int + TempDir string + TeeType TeeType + IgnoreAttestationErrors bool + WorkloadID string + DiskEncryptionPassphrase string + Slop string + FirmwareLibrary string +} + // TempDirForURL checks if the passed-in string looks like a URL or -. If it is, // TempDirForURL creates a temporary directory, arranges for its contents to be // the contents of that URL, and returns the temporary directory's path, along diff --git a/vendor/github.com/containers/buildah/image.go b/vendor/github.com/containers/buildah/image.go index 01c1784d..7318e04b 100644 --- a/vendor/github.com/containers/buildah/image.go +++ b/vendor/github.com/containers/buildah/image.go @@ -16,6 +16,9 @@ import ( "github.com/containers/buildah/copier" "github.com/containers/buildah/define" "github.com/containers/buildah/docker" + "github.com/containers/buildah/internal/config" + "github.com/containers/buildah/internal/mkcw" + "github.com/containers/buildah/internal/tmpdir" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/image" "github.com/containers/image/v5/manifest" @@ -69,6 +72,7 @@ type containerImageRef struct { annotations map[string]string preferredManifestType string squash bool + confidentialWorkload ConfidentialWorkloadOptions omitHistory bool emptyLayer bool idMappingOptions *define.IDMappingOptions @@ -76,6 +80,8 @@ type containerImageRef struct { blobDirectory string preEmptyLayers []v1.History postEmptyLayers []v1.History + overrideChanges []string + overrideConfig *manifest.Schema2Config } type blobLayerInfo struct { @@ -158,6 +164,52 @@ func computeLayerMIMEType(what string, layerCompression archive.Compression) (om return omediaType, dmediaType, nil } +// Extract the container's whole filesystem as a filesystem image, wrapped +// in LUKS-compatible encryption. +func (i *containerImageRef) extractConfidentialWorkloadFS(options ConfidentialWorkloadOptions) (io.ReadCloser, error) { + var image v1.Image + if err := json.Unmarshal(i.oconfig, &image); err != nil { + return nil, fmt.Errorf("recreating OCI configuration for %q: %w", i.containerID, err) + } + mountPoint, err := i.store.Mount(i.containerID, i.mountLabel) + if err != nil { + return nil, fmt.Errorf("mounting container %q: %w", i.containerID, err) + } + archiveOptions := mkcw.ArchiveOptions{ + AttestationURL: options.AttestationURL, + CPUs: options.CPUs, + Memory: options.Memory, + TempDir: options.TempDir, + TeeType: options.TeeType, + IgnoreAttestationErrors: options.IgnoreAttestationErrors, + WorkloadID: options.WorkloadID, + DiskEncryptionPassphrase: options.DiskEncryptionPassphrase, + Slop: options.Slop, + FirmwareLibrary: options.FirmwareLibrary, + } + rc, _, err := mkcw.Archive(mountPoint, &image, archiveOptions) + if err != nil { + if _, err2 := i.store.Unmount(i.containerID, false); err2 != nil { + logrus.Debugf("unmounting container %q: %v", i.containerID, err2) + } + return nil, fmt.Errorf("converting rootfs %q: %w", i.containerID, err) + } + return ioutils.NewReadCloserWrapper(rc, func() error { + if err = rc.Close(); err != nil { + err = fmt.Errorf("closing tar archive of container %q: %w", i.containerID, err) + } + if _, err2 := i.store.Unmount(i.containerID, false); err == nil { + if err2 != nil { + err2 = fmt.Errorf("unmounting container %q: %w", i.containerID, err2) + } + err = err2 + } else { + logrus.Debugf("unmounting container %q: %v", i.containerID, err2) + } + return err + }), nil +} + // Extract the container's whole filesystem as if it were a single layer. // Takes ExtractRootfsOptions as argument which allows caller to configure // preserve nature of setuid,setgid,sticky and extended attributes @@ -221,7 +273,7 @@ func (i *containerImageRef) createConfigsAndManifests() (v1.Image, v1.Manifest, oimage.RootFS.DiffIDs = []digest.Digest{} // Only clear the history if we're squashing, otherwise leave it be so that we can append // entries to it. - if i.squash || i.omitHistory { + if i.confidentialWorkload.Convert || i.squash || i.omitHistory { oimage.History = []v1.History{} } @@ -244,11 +296,36 @@ func (i *containerImageRef) createConfigsAndManifests() (v1.Image, v1.Manifest, // Only clear the history if we're squashing, otherwise leave it be so // that we can append entries to it. Clear the parent, too, we no // longer include its layers and history. - if i.squash || i.omitHistory { + if i.confidentialWorkload.Convert || i.squash || i.omitHistory { dimage.Parent = "" dimage.History = []docker.V2S2History{} } + // If we were supplied with a configuration, copy fields from it to + // matching fields in both formats. + if err := config.Override(dimage.Config, &oimage.Config, i.overrideChanges, i.overrideConfig); err != nil { + return v1.Image{}, v1.Manifest{}, docker.V2Image{}, docker.V2S2Manifest{}, fmt.Errorf("applying changes: %w", err) + } + + // If we're producing a confidential workload, override the command and + // assorted other settings that aren't expected to work correctly. + if i.confidentialWorkload.Convert { + dimage.Config.Entrypoint = []string{"/entrypoint"} + oimage.Config.Entrypoint = []string{"/entrypoint"} + dimage.Config.Cmd = nil + oimage.Config.Cmd = nil + dimage.Config.User = "" + oimage.Config.User = "" + dimage.Config.WorkingDir = "" + oimage.Config.WorkingDir = "" + dimage.Config.Healthcheck = nil + dimage.Config.Shell = nil + dimage.Config.Volumes = nil + oimage.Config.Volumes = nil + dimage.Config.ExposedPorts = nil + oimage.Config.ExposedPorts = nil + } + // Build empty manifests. The Layers lists will be populated later. omanifest := v1.Manifest{ Versioned: specs.Versioned{ @@ -296,7 +373,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System for layer != nil { layers = append(append([]string{}, layerID), layers...) layerID = layer.Parent - if layerID == "" || i.squash { + if layerID == "" || i.confidentialWorkload.Convert || i.squash { err = nil break } @@ -308,7 +385,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System logrus.Debugf("layer list: %q", layers) // Make a temporary directory to hold blobs. - path, err := os.MkdirTemp(os.TempDir(), define.Package) + path, err := os.MkdirTemp(tmpdir.GetTempDir(), define.Package) if err != nil { return nil, fmt.Errorf("creating temporary directory to hold layer blobs: %w", err) } @@ -333,7 +410,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System blobLayers := make(map[digest.Digest]blobLayerInfo) for _, layerID := range layers { what := fmt.Sprintf("layer %q", layerID) - if i.squash { + if i.confidentialWorkload.Convert || i.squash { what = fmt.Sprintf("container %q", i.containerID) } // The default layer media type assumes no compression. @@ -344,14 +421,9 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System if err != nil { return nil, fmt.Errorf("unable to locate layer %q: %w", layerID, err) } - // If we're up to the final layer, but we don't want to include - // a diff for it, we're done. - if i.emptyLayer && layerID == i.layerID { - continue - } // If we already know the digest of the contents of parent // layers, reuse their blobsums, diff IDs, and sizes. - if !i.squash && layerID != i.layerID && layer.UncompressedDigest != "" { + if !i.confidentialWorkload.Convert && !i.squash && layerID != i.layerID && layer.UncompressedDigest != "" { layerBlobSum := layer.UncompressedDigest layerBlobSize := layer.UncompressedSize diffID := layer.UncompressedDigest @@ -389,13 +461,24 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System } var rc io.ReadCloser var errChan chan error - if i.squash { + if i.confidentialWorkload.Convert { + // Convert the root filesystem into an encrypted disk image. + rc, err = i.extractConfidentialWorkloadFS(i.confidentialWorkload) + if err != nil { + return nil, err + } + } else if i.squash { // Extract the root filesystem as a single layer. rc, errChan, err = i.extractRootfs(ExtractRootfsOptions{}) if err != nil { return nil, err } } else { + // If we're up to the final layer, but we don't want to + // include a diff for it, we're done. + if i.emptyLayer && layerID == i.layerID { + continue + } // Extract this layer, one of possibly many. rc, err = i.store.Diff("", layerID, diffOptions) if err != nil { @@ -842,13 +925,16 @@ func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageR annotations: b.Annotations(), preferredManifestType: manifestType, squash: options.Squash, + confidentialWorkload: options.ConfidentialWorkloadOptions, omitHistory: options.OmitHistory, - emptyLayer: options.EmptyLayer && !options.Squash, + emptyLayer: options.EmptyLayer && !options.Squash && !options.ConfidentialWorkloadOptions.Convert, idMappingOptions: &b.IDMappingOptions, parent: parent, blobDirectory: options.BlobDirectory, preEmptyLayers: b.PrependedEmptyLayers, postEmptyLayers: b.AppendedEmptyLayers, + overrideChanges: options.OverrideChanges, + overrideConfig: options.OverrideConfig, } return ref, nil } diff --git a/vendor/github.com/containers/buildah/imagebuildah/build.go b/vendor/github.com/containers/buildah/imagebuildah/build.go index 5cba467e..03081fde 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/build.go +++ b/vendor/github.com/containers/buildah/imagebuildah/build.go @@ -15,8 +15,10 @@ import ( "sync" "github.com/containerd/containerd/platforms" + "github.com/containers/buildah" "github.com/containers/buildah/define" internalUtil "github.com/containers/buildah/internal/util" + "github.com/containers/buildah/pkg/parse" "github.com/containers/buildah/util" "github.com/containers/common/libimage" "github.com/containers/common/pkg/config" @@ -65,7 +67,9 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B if options.CommonBuildOpts == nil { options.CommonBuildOpts = &define.CommonBuildOptions{} } - + if err := parse.Volumes(options.CommonBuildOpts.Volumes); err != nil { + return "", nil, fmt.Errorf("validating volumes: %w", err) + } if len(paths) == 0 { return "", nil, errors.New("building: no dockerfiles specified") } @@ -264,6 +268,9 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B } thisID, thisRef, err := buildDockerfilesOnce(ctx, store, loggerPerPlatform, logPrefix, platformOptions, paths, files) if err != nil { + if errorContext := strings.TrimSpace(logPrefix); errorContext != "" { + return fmt.Errorf("%s: %w", errorContext, err) + } return err } instancesLock.Lock() @@ -666,7 +673,7 @@ func baseImages(dockerfilenames []string, dockerfilecontents [][]byte, from stri } } base := child.Next.Value - if base != "scratch" && !nicknames[base] { + if base != "" && base != buildah.BaseImageFakeName && !nicknames[base] { headingArgs := argsMapToSlice(stage.Builder.HeadingArgs) userArgs := argsMapToSlice(stage.Builder.Args) // append heading args so if --build-arg key=value is not diff --git a/vendor/github.com/containers/buildah/imagebuildah/executor.go b/vendor/github.com/containers/buildah/imagebuildah/executor.go index 69ad23bb..917c84f6 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/executor.go +++ b/vendor/github.com/containers/buildah/imagebuildah/executor.go @@ -22,7 +22,6 @@ import ( "github.com/containers/common/pkg/config" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/manifest" - is "github.com/containers/image/v5/storage" storageTransport "github.com/containers/image/v5/storage" "github.com/containers/image/v5/transports" "github.com/containers/image/v5/transports/alltransports" @@ -101,8 +100,10 @@ type Executor struct { iidfile string squash bool labels []string + layerLabels []string annotations []string layers bool + noHostname bool noHosts bool useCache bool removeIntermediateCtrs bool @@ -116,6 +117,7 @@ type Executor struct { groupAdd []string ignoreFile string args map[string]string + globalArgs map[string]string unusedArgs map[string]struct{} capabilities []string devices define.ContainerDevices @@ -141,12 +143,14 @@ type Executor struct { sshsources map[string]*sshagent.Source logPrefix string unsetEnvs []string + unsetLabels []string processLabel string // Shares processLabel of first stage container with containers of other stages in same build mountLabel string // Shares mountLabel of first stage container with containers of other stages in same build buildOutput string // Specifies instructions for any custom build output osVersion string osFeatures []string envs []string + confidentialWorkload define.ConfidentialWorkloadOptions } type imageTypeAndHistoryAndDiffIDs struct { @@ -176,7 +180,7 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o } devices := define.ContainerDevices{} - for _, device := range append(defaultContainerConfig.Containers.Devices, options.Devices...) { + for _, device := range append(defaultContainerConfig.Containers.Devices.Get(), options.Devices...) { dev, err := parse.DeviceFromPath(device) if err != nil { return nil, err @@ -185,7 +189,7 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o } transientMounts := []Mount{} - for _, volume := range append(defaultContainerConfig.Containers.Volumes, options.TransientMounts...) { + for _, volume := range append(defaultContainerConfig.Volumes(), options.TransientMounts...) { mount, err := parse.Volume(volume) if err != nil { return nil, err @@ -264,8 +268,10 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o iidfile: options.IIDFile, squash: options.Squash, labels: append([]string{}, options.Labels...), + layerLabels: append([]string{}, options.LayerLabels...), annotations: append([]string{}, options.Annotations...), layers: options.Layers, + noHostname: options.CommonBuildOpts.NoHostname, noHosts: options.CommonBuildOpts.NoHosts, useCache: !options.NoCache, removeIntermediateCtrs: options.RemoveIntermediateCtrs, @@ -297,10 +303,12 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o sshsources: sshsources, logPrefix: logPrefix, unsetEnvs: append([]string{}, options.UnsetEnvs...), + unsetLabels: append([]string{}, options.UnsetLabels...), buildOutput: options.BuildOutput, osVersion: options.OSVersion, osFeatures: append([]string{}, options.OSFeatures...), envs: append([]string{}, options.Envs...), + confidentialWorkload: options.ConfidentialWorkload, } if exec.err == nil { exec.err = os.Stderr @@ -314,6 +322,11 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o exec.unusedArgs[arg] = struct{}{} } } + // Use this flag to collect all args declared before + // first stage and treat them as global args which is + // accessible to all stages. + foundFirstStage := false + globalArgs := make(map[string]string) for _, line := range mainNode.Children { node := line for node != nil { // tokens on this line, though we only care about the first @@ -325,12 +338,20 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o // and value, or just an argument, since they can be // separated by either "=" or whitespace. list := strings.SplitN(arg.Value, "=", 2) + if !foundFirstStage { + if len(list) > 1 { + globalArgs[list[0]] = list[1] + } + } delete(exec.unusedArgs, list[0]) } + case "FROM": + foundFirstStage = true } break } } + exec.globalArgs = globalArgs return &exec, nil } @@ -361,15 +382,11 @@ func (b *Executor) resolveNameToImageRef(output string) (types.ImageReference, e if imageRef, err := alltransports.ParseImageName(output); err == nil { return imageRef, nil } - runtime, err := libimage.RuntimeFromStore(b.store, &libimage.RuntimeOptions{SystemContext: b.systemContext}) - if err != nil { - return nil, err - } - resolved, err := runtime.ResolveName(output) + resolved, err := libimage.NormalizeName(output) if err != nil { return nil, err } - imageRef, err := storageTransport.Transport.ParseStoreReference(b.store, resolved) + imageRef, err := storageTransport.Transport.ParseStoreReference(b.store, resolved.String()) if err == nil { return imageRef, nil } @@ -424,7 +441,7 @@ func (b *Executor) getImageTypeAndHistoryAndDiffIDs(ctx context.Context, imageID if ok { return imageInfo.manifestType, imageInfo.history, imageInfo.diffIDs, imageInfo.err } - imageRef, err := is.Transport.ParseStoreReference(b.store, "@"+imageID) + imageRef, err := storageTransport.Transport.ParseStoreReference(b.store, "@"+imageID) if err != nil { return "", nil, nil, fmt.Errorf("getting image reference %q: %w", imageID, err) } @@ -455,14 +472,14 @@ func (b *Executor) getImageTypeAndHistoryAndDiffIDs(ctx context.Context, imageID return manifestFormat, oci.History, oci.RootFS.DiffIDs, nil } -func (b *Executor) buildStage(ctx context.Context, cleanupStages map[int]*StageExecutor, stages imagebuilder.Stages, stageIndex int) (imageID string, ref reference.Canonical, err error) { +func (b *Executor) buildStage(ctx context.Context, cleanupStages map[int]*StageExecutor, stages imagebuilder.Stages, stageIndex int) (imageID string, ref reference.Canonical, onlyBaseImage bool, err error) { stage := stages[stageIndex] ib := stage.Builder node := stage.Node base, err := ib.From(node) if err != nil { logrus.Debugf("buildStage(node.Children=%#v)", node.Children) - return "", nil, err + return "", nil, false, err } // If this is the last stage, then the image that we produce at @@ -470,33 +487,33 @@ func (b *Executor) buildStage(ctx context.Context, cleanupStages map[int]*StageE output := "" if stageIndex == len(stages)-1 { output = b.output - } - // Check if any labels were passed in via the API, and add a final line - // to the Dockerfile that would provide the same result. - // Reason: Docker adds label modification as a last step which can be - // processed like regular steps, and if no modification is done to - // layers, its easier to re-use cached layers. - if len(b.labels) > 0 { - var labelLine string - labels := append([]string{}, b.labels...) - for _, labelSpec := range labels { - label := strings.SplitN(labelSpec, "=", 2) - key := label[0] - value := "" - if len(label) > 1 { - value = label[1] - } - // check only for an empty key since docker allows empty values - if key != "" { - labelLine += fmt.Sprintf(" %q=%q", key, value) + // Check if any labels were passed in via the API, and add a final line + // to the Dockerfile that would provide the same result. + // Reason: Docker adds label modification as a last step which can be + // processed like regular steps, and if no modification is done to + // layers, its easier to re-use cached layers. + if len(b.labels) > 0 { + var labelLine string + labels := append([]string{}, b.labels...) + for _, labelSpec := range labels { + label := strings.SplitN(labelSpec, "=", 2) + key := label[0] + value := "" + if len(label) > 1 { + value = label[1] + } + // check only for an empty key since docker allows empty values + if key != "" { + labelLine += fmt.Sprintf(" %q=%q", key, value) + } } - } - if len(labelLine) > 0 { - additionalNode, err := imagebuilder.ParseDockerfile(strings.NewReader("LABEL" + labelLine + "\n")) - if err != nil { - return "", nil, fmt.Errorf("while adding additional LABEL step: %w", err) + if len(labelLine) > 0 { + additionalNode, err := imagebuilder.ParseDockerfile(strings.NewReader("LABEL" + labelLine + "\n")) + if err != nil { + return "", nil, false, fmt.Errorf("while adding additional LABEL step: %w", err) + } + stage.Node.Children = append(stage.Node.Children, additionalNode.Children...) } - stage.Node.Children = append(stage.Node.Children, additionalNode.Children...) } } @@ -512,13 +529,13 @@ func (b *Executor) buildStage(ctx context.Context, cleanupStages map[int]*StageE value := env[1] envLine += fmt.Sprintf(" %q=%q", key, value) } else { - return "", nil, fmt.Errorf("BUG: unresolved environment variable: %q", key) + return "", nil, false, fmt.Errorf("BUG: unresolved environment variable: %q", key) } } if len(envLine) > 0 { additionalNode, err := imagebuilder.ParseDockerfile(strings.NewReader("ENV" + envLine + "\n")) if err != nil { - return "", nil, fmt.Errorf("while adding additional ENV step: %w", err) + return "", nil, false, fmt.Errorf("while adding additional ENV step: %w", err) } // make this the first instruction in the stage after its FROM instruction stage.Node.Children = append(additionalNode.Children, stage.Node.Children...) @@ -559,8 +576,8 @@ func (b *Executor) buildStage(ctx context.Context, cleanupStages map[int]*StageE } // Build this stage. - if imageID, ref, err = stageExecutor.Execute(ctx, base); err != nil { - return "", nil, err + if imageID, ref, onlyBaseImage, err = stageExecutor.Execute(ctx, base); err != nil { + return "", nil, onlyBaseImage, err } // The stage succeeded, so remove its build container if we're @@ -573,7 +590,7 @@ func (b *Executor) buildStage(ctx context.Context, cleanupStages map[int]*StageE b.stagesLock.Unlock() } - return imageID, ref, nil + return imageID, ref, onlyBaseImage, nil } type stageDependencyInfo struct { @@ -624,6 +641,9 @@ func (b *Executor) warnOnUnsetBuildArgs(stages imagebuilder.Stages, dependencyMa if _, isBuiltIn := builtinAllowedBuildArgs[argName]; isBuiltIn { shouldWarn = false } + if _, isGlobalArg := b.globalArgs[argName]; isGlobalArg { + shouldWarn = false + } if shouldWarn { b.logger.Warnf("missing %q build argument. Try adding %q to the command line", argName, fmt.Sprintf("--build-arg %s=", argName)) } @@ -742,7 +762,7 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image b.fromOverride = "" } base := child.Next.Value - if base != "scratch" { + if base != "" && base != buildah.BaseImageFakeName { if replaceBuildContext, ok := b.additionalBuildContexts[child.Next.Value]; ok { if replaceBuildContext.IsImage { child.Next.Value = replaceBuildContext.Value @@ -862,10 +882,11 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image b.warnOnUnsetBuildArgs(stages, dependencyMap, b.args) type Result struct { - Index int - ImageID string - Ref reference.Canonical - Error error + Index int + ImageID string + OnlyBaseImage bool + Ref reference.Canonical + Error error } ch := make(chan Result, len(stages)) @@ -925,21 +946,23 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image return } } - stageID, stageRef, stageErr := b.buildStage(ctx, cleanupStages, stages, index) + stageID, stageRef, stageOnlyBaseImage, stageErr := b.buildStage(ctx, cleanupStages, stages, index) if stageErr != nil { cancel = true ch <- Result{ - Index: index, - Error: stageErr, + Index: index, + Error: stageErr, + OnlyBaseImage: stageOnlyBaseImage, } return } ch <- Result{ - Index: index, - ImageID: stageID, - Ref: stageRef, - Error: nil, + Index: index, + ImageID: stageID, + Ref: stageRef, + OnlyBaseImage: stageOnlyBaseImage, + Error: nil, } }() } @@ -969,7 +992,9 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image // We're not populating the cache with intermediate // images, so add this one to the list of images that // we'll remove later. - if !b.layers { + // Only remove intermediate image is `--layers` is not provided + // or following stage was not only a base image ( i.e a different image ). + if !b.layers && !r.OnlyBaseImage { cleanupImages = append(cleanupImages, r.ImageID) } } @@ -992,8 +1017,8 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image // Add additional tags and print image names recorded in storage if dest, err := b.resolveNameToImageRef(b.output); err == nil { switch dest.Transport().Name() { - case is.Transport.Name(): - img, err := is.Transport.GetStoreImage(b.store, dest) + case storageTransport.Transport.Name(): + _, img, err := storageTransport.ResolveReference(dest) if err != nil { return imageID, ref, fmt.Errorf("locating just-written image %q: %w", transports.ImageName(dest), err) } @@ -1004,7 +1029,7 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image logrus.Debugf("assigned names %v to image %q", img.Names, img.ID) } // Report back the caller the tags applied, if any. - img, err = is.Transport.GetStoreImage(b.store, dest) + _, img, err = storageTransport.ResolveReference(dest) if err != nil { return imageID, ref, fmt.Errorf("locating just-written image %q: %w", transports.ImageName(dest), err) } diff --git a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go index 7f1933e3..9398dcef 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go +++ b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go @@ -7,6 +7,7 @@ import ( "fmt" "io" "os" + "path" "path/filepath" "sort" "strconv" @@ -18,6 +19,7 @@ import ( "github.com/containers/buildah/define" buildahdocker "github.com/containers/buildah/docker" "github.com/containers/buildah/internal" + "github.com/containers/buildah/internal/tmpdir" internalUtil "github.com/containers/buildah/internal/util" "github.com/containers/buildah/pkg/parse" "github.com/containers/buildah/pkg/rusage" @@ -34,6 +36,7 @@ import ( "github.com/containers/storage/pkg/chrootarchive" "github.com/containers/storage/pkg/unshare" docker "github.com/fsouza/go-dockerclient" + buildkitparser "github.com/moby/buildkit/frontend/dockerfile/parser" digest "github.com/opencontainers/go-digest" v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/opencontainers/runtime-spec/specs-go" @@ -347,12 +350,12 @@ func (s *StageExecutor) volumeCacheRestore() error { // imagebuilder tells us the instruction was "ADD" and not "COPY". func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) error { s.builder.ContentDigester.Restart() + return s.performCopy(excludes, copies...) +} + +func (s *StageExecutor) performCopy(excludes []string, copies ...imagebuilder.Copy) error { + copiesExtend := []imagebuilder.Copy{} for _, copy := range copies { - if copy.Download { - logrus.Debugf("ADD %#v, %#v", excludes, copy) - } else { - logrus.Debugf("COPY %#v, %#v", excludes, copy) - } if err := s.volumeCacheInvalidate(copy.Dest); err != nil { return err } @@ -366,7 +369,61 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err stripSetgid := false preserveOwnership := false contextDir := s.executor.contextDir - if len(copy.From) > 0 { + // If we are copying files via heredoc syntax, then + // its time to create these temporary files on host + // and copy these to container + if len(copy.Files) > 0 { + // If we are copying files from heredoc syntax, there + // maybe regular files from context as well so split and + // process them differently + if len(copy.Src) > len(copy.Files) { + regularSources := []string{} + for _, src := range copy.Src { + // If this source is not a heredoc, then it is a regular file from + // build context or from another stage (`--from=`) so treat this differently. + if !strings.HasPrefix(src, "<<") { + regularSources = append(regularSources, src) + } + } + copyEntry := copy + // Remove heredoc if any, since we are already processing them + // so create new entry with sources containing regular files + // only, since regular files can have different context then + // heredoc files. + copyEntry.Files = nil + copyEntry.Src = regularSources + copiesExtend = append(copiesExtend, copyEntry) + } + copySources := []string{} + for _, file := range copy.Files { + data := file.Data + // remove first break line added while parsing heredoc + data = strings.TrimPrefix(data, "\n") + // add breakline when heredoc ends for docker compat + data = data + "\n" + tmpFile, err := os.Create(filepath.Join(parse.GetTempDir(), path.Base(filepath.ToSlash(file.Name)))) + if err != nil { + return fmt.Errorf("unable to create tmp file for COPY instruction at %q: %w", parse.GetTempDir(), err) + } + err = tmpFile.Chmod(0644) // 644 is consistent with buildkit + if err != nil { + tmpFile.Close() + return fmt.Errorf("unable to chmod tmp file created for COPY instruction at %q: %w", tmpFile.Name(), err) + } + defer os.Remove(tmpFile.Name()) + _, err = tmpFile.WriteString(data) + if err != nil { + tmpFile.Close() + return fmt.Errorf("unable to write contents of heredoc file at %q: %w", tmpFile.Name(), err) + } + copySources = append(copySources, filepath.Base(tmpFile.Name())) + tmpFile.Close() + } + contextDir = parse.GetTempDir() + copy.Src = copySources + } + + if len(copy.From) > 0 && len(copy.Files) == 0 { // If from has an argument within it, resolve it to its // value. Otherwise just return the value found. from, fromErr := imagebuilder.ProcessWord(copy.From, s.stage.Builder.Arguments()) @@ -401,7 +458,7 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err // additional context contains a tar file // so download and explode tar to buildah // temp and point context to that. - path, subdir, err := define.TempDirForURL(internalUtil.GetTempDir(), internal.BuildahExternalArtifactsDir, additionalBuildContext.Value) + path, subdir, err := define.TempDirForURL(tmpdir.GetTempDir(), internal.BuildahExternalArtifactsDir, additionalBuildContext.Value) if err != nil { return fmt.Errorf("unable to download context from external source %q: %w", additionalBuildContext.Value, err) } @@ -412,6 +469,16 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err } else { contextDir = additionalBuildContext.DownloadedCache } + } else { + // This points to a path on the filesystem + // Check to see if there's a .containerignore + // file, update excludes for this stage before + // proceeding + buildContextExcludes, _, err := parse.ContainerIgnoreFile(additionalBuildContext.Value, "", nil) + if err != nil { + return err + } + excludes = append(excludes, buildContextExcludes...) } } else { copy.From = additionalBuildContext.Value @@ -446,6 +513,11 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err stripSetuid = true // did this change between 18.06 and 19.03? stripSetgid = true // did this change between 18.06 and 19.03? } + if copy.Download { + logrus.Debugf("ADD %#v, %#v", excludes, copy) + } else { + logrus.Debugf("COPY %#v, %#v", excludes, copy) + } for _, src := range copy.Src { if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") { // Source is a URL, allowed for ADD but not COPY. @@ -462,6 +534,7 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err options := buildah.AddAndCopyOptions{ Chmod: copy.Chmod, Chown: copy.Chown, + Checksum: copy.Checksum, PreserveOwnership: preserveOwnership, ContextDir: contextDir, Excludes: copyExcludes, @@ -474,6 +547,13 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err return err } } + if len(copiesExtend) > 0 { + // If we found heredocs and regularfiles together + // in same statement then we produced new copies to + // process regular files separately since they need + // different context. + return s.performCopy(excludes, copiesExtend...) + } return nil } @@ -537,7 +617,7 @@ func (s *StageExecutor) runStageMountPoints(mountList []string) (map[string]inte // additional context contains a tar file // so download and explode tar to buildah // temp and point context to that. - path, subdir, err := define.TempDirForURL(internalUtil.GetTempDir(), internal.BuildahExternalArtifactsDir, additionalBuildContext.Value) + path, subdir, err := define.TempDirForURL(tmpdir.GetTempDir(), internal.BuildahExternalArtifactsDir, additionalBuildContext.Value) if err != nil { return nil, fmt.Errorf("unable to download context from external source %q: %w", additionalBuildContext.Value, err) } @@ -579,10 +659,59 @@ func (s *StageExecutor) runStageMountPoints(mountList []string) (map[string]inte return stageMountPoints, nil } +func (s *StageExecutor) createNeededHeredocMountsForRun(files []imagebuilder.File) ([]Mount, error) { + mountResult := []Mount{} + for _, file := range files { + f, err := os.CreateTemp(parse.GetTempDir(), "buildahheredoc") + if err != nil { + return nil, err + } + if _, err := f.WriteString(file.Data); err != nil { + f.Close() + return nil, err + } + err = f.Chmod(0755) + if err != nil { + f.Close() + return nil, err + } + // dest path is same as buildkit for compat + dest := filepath.Join("/dev/pipes/", filepath.Base(f.Name())) + mount := Mount{Destination: dest, Type: define.TypeBind, Source: f.Name(), Options: append(define.BindOptions, "rprivate", "z", "Z")} + mountResult = append(mountResult, mount) + f.Close() + } + return mountResult, nil +} + // Run executes a RUN instruction using the stage's current working container // as a root directory. func (s *StageExecutor) Run(run imagebuilder.Run, config docker.Config) error { logrus.Debugf("RUN %#v, %#v", run, config) + args := run.Args + heredocMounts := []Mount{} + if len(run.Files) > 0 { + if heredoc := buildkitparser.MustParseHeredoc(args[0]); heredoc != nil { + if strings.HasPrefix(run.Files[0].Data, "#!") || strings.HasPrefix(run.Files[0].Data, "\n#!") { + // This is a single heredoc with a shebang, so create a file + // and run it. + heredocMount, err := s.createNeededHeredocMountsForRun(run.Files) + if err != nil { + return err + } + args = []string{heredocMount[0].Destination} + heredocMounts = append(heredocMounts, heredocMount...) + } else { + args = []string{run.Files[0].Data} + } + } else { + full := args[0] + for _, file := range run.Files { + full += file.Data + "\n" + file.Name + } + args = []string{full} + } + } stageMountPoints, err := s.runStageMountPoints(run.Mounts) if err != nil { return err @@ -611,6 +740,7 @@ func (s *StageExecutor) Run(run imagebuilder.Run, config docker.Config) error { Logger: s.executor.logger, Mounts: append([]Mount{}, s.executor.transientMounts...), NamespaceOptions: namespaceOptions, + NoHostname: s.executor.noHostname, NoHosts: s.executor.noHosts, NoPivot: os.Getenv("BUILDAH_NOPIVOT") != "", Quiet: s.executor.quiet, @@ -645,7 +775,6 @@ func (s *StageExecutor) Run(run imagebuilder.Run, config docker.Config) error { options.ConfigureNetwork = buildah.NetworkDisabled } - args := run.Args if run.Shell { if len(config.Shell) > 0 && s.builder.Format == define.Dockerv2ImageManifest { args = append(config.Shell, args...) @@ -658,6 +787,9 @@ func (s *StageExecutor) Run(run imagebuilder.Run, config docker.Config) error { return err } options.Mounts = append(options.Mounts, mounts...) + if len(heredocMounts) > 0 { + options.Mounts = append(options.Mounts, heredocMounts...) + } err = s.builder.Run(args, options) if err2 := s.volumeCacheRestore(); err2 != nil { if err == nil { @@ -821,7 +953,7 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo } } dImage := docker.Image{ - Parent: builder.FromImage, + Parent: builder.FromImageID, ContainerConfig: dConfig, Container: builder.Container, Author: builder.Maintainer(), @@ -904,13 +1036,14 @@ func (s *StageExecutor) getContentSummaryAfterAddingContent() string { } // Execute runs each of the steps in the stage's parsed tree, in turn. -func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, ref reference.Canonical, err error) { +func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, ref reference.Canonical, onlyBaseImg bool, err error) { var resourceUsage rusage.Rusage stage := s.stage ib := stage.Builder checkForLayers := s.executor.layers && s.executor.useCache moreStages := s.index < len(s.stages)-1 lastStage := !moreStages + onlyBaseImage := false imageIsUsedLater := moreStages && (s.executor.baseMap[stage.Name] || s.executor.baseMap[strconv.Itoa(stage.Position)]) rootfsIsUsedLater := moreStages && (s.executor.rootfsMap[stage.Name] || s.executor.rootfsMap[strconv.Itoa(stage.Position)]) @@ -923,7 +1056,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, // either in local storage, or one that we have to pull from a // registry, subject to the passed-in pull policy. if isStage, err := s.executor.waitForStage(ctx, base, s.stages[:s.index]); isStage && err != nil { - return "", nil, err + return "", nil, false, err } pullPolicy := s.executor.pullPolicy s.executor.stagesLock.Lock() @@ -953,7 +1086,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, // Start counting resource usage before we potentially pull a base image. if rusage.Supported() { if resourceUsage, err = rusage.Get(); err != nil { - return "", nil, err + return "", nil, false, err } // Log the final incremental resource usage counter before we return. defer logRusage() @@ -963,7 +1096,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, // the imagebuilder configuration may alter the list of steps we have, // so take a snapshot of them *after* that. if _, err := s.prepare(ctx, base, true, true, preserveBaseImageAnnotationsAtStageStart, pullPolicy); err != nil { - return "", nil, err + return "", nil, false, err } children := stage.Node.Children @@ -1021,56 +1154,40 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, logrus.Debugf("Generating custom build output with options %q", s.executor.buildOutput) buildOutputOption, err = parse.GetBuildOutput(s.executor.buildOutput) if err != nil { - return "", nil, fmt.Errorf("failed to parse build output: %w", err) + return "", nil, false, fmt.Errorf("failed to parse build output: %w", err) } } if len(children) == 0 { // There are no steps. - if s.builder.FromImageID == "" || s.executor.squash { + if s.builder.FromImageID == "" || s.executor.squash || s.executor.confidentialWorkload.Convert || len(s.executor.labels) > 0 || len(s.executor.annotations) > 0 || len(s.executor.unsetEnvs) > 0 || len(s.executor.unsetLabels) > 0 { // We either don't have a base image, or we need to - // squash the contents of the base image. Whichever is - // the case, we need to commit() to create a new image. - logCommit(s.output, -1) - if imgID, ref, err = s.commit(ctx, s.getCreatedBy(nil, ""), false, s.output, s.executor.squash); err != nil { - return "", nil, fmt.Errorf("committing base container: %w", err) - } - // Generate build output if needed. - if canGenerateBuildOutput { - if err := s.generateBuildOutput(buildOutputOption); err != nil { - return "", nil, err - } - } - } else if len(s.executor.labels) > 0 || len(s.executor.annotations) > 0 { - // The image would be modified by the labels passed - // via the command line, so we need to commit. + // transform the contents of the base image, or we need + // to make some changes to just the config blob. Whichever + // is the case, we need to commit() to create a new image. logCommit(s.output, -1) - if imgID, ref, err = s.commit(ctx, s.getCreatedBy(stage.Node, ""), true, s.output, s.executor.squash); err != nil { - return "", nil, err - } - // Generate build output if needed. - if canGenerateBuildOutput { - if err := s.generateBuildOutput(buildOutputOption); err != nil { - return "", nil, err - } + // No base image means there's nothing to put in a + // layer, so don't create one. + emptyLayer := (s.builder.FromImageID == "") + if imgID, ref, err = s.commit(ctx, s.getCreatedBy(nil, ""), emptyLayer, s.output, s.executor.squash, lastStage); err != nil { + return "", nil, false, fmt.Errorf("committing base container: %w", err) } } else { - // We don't need to squash the base image, and the - // image wouldn't be modified by the command line - // options, so just reuse the base image. + // We don't need to squash or otherwise transform the + // base image, and the image wouldn't be modified by + // the command line options, so just reuse the base + // image. logCommit(s.output, -1) if imgID, ref, err = s.tagExistingImage(ctx, s.builder.FromImageID, s.output); err != nil { - return "", nil, err + return "", nil, onlyBaseImage, err } - // If we have reached this point then our build is just performing a tag - // and it contains no steps or instructions (i.e Containerfile only contains - // `FROM and nothing else so we will never end up committing this - // but instead just re-tag image. For such use-cases if `-o` or `--output` was - // specified honor that and export the contents of the current build anyways. - if canGenerateBuildOutput { - if err := s.generateBuildOutput(buildOutputOption); err != nil { - return "", nil, err - } + onlyBaseImage = true + } + // Generate build output from the new image, or the preexisting + // one if we didn't actually do anything, if needed. + if canGenerateBuildOutput { + if err := s.generateBuildOutput(buildOutputOption); err != nil { + return "", nil, onlyBaseImage, err } } logImageID(imgID) @@ -1083,7 +1200,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, // Resolve any arguments in this instruction. step := ib.Step() if err := step.Resolve(node); err != nil { - return "", nil, fmt.Errorf("resolving step %+v: %w", *node, err) + return "", nil, false, fmt.Errorf("resolving step %+v: %w", *node, err) } logrus.Debugf("Parsed Step: %+v", *step) if !s.executor.quiet { @@ -1096,21 +1213,21 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, command := strings.ToUpper(step.Command) // chmod, chown and from flags should have an '=' sign, '--chmod=', '--chown=' or '--from=' if command == "COPY" && (flag == "--chmod" || flag == "--chown" || flag == "--from") { - return "", nil, fmt.Errorf("COPY only supports the --chmod= --chown= and the --from= flags") + return "", nil, false, fmt.Errorf("COPY only supports the --chmod= --chown= and the --from= flags") } - if command == "ADD" && (flag == "--chmod" || flag == "--chown") { - return "", nil, fmt.Errorf("ADD only supports the --chmod= and the --chown= flags") + if command == "ADD" && (flag == "--chmod" || flag == "--chown" || flag == "--checksum") { + return "", nil, false, fmt.Errorf("ADD only supports the --chmod=, --chown=, and --checksum= flags") } if strings.Contains(flag, "--from") && command == "COPY" { arr := strings.Split(flag, "=") if len(arr) != 2 { - return "", nil, fmt.Errorf("%s: invalid --from flag, should be --from=", command) + return "", nil, false, fmt.Errorf("%s: invalid --from flag %q, should be --from=", command, flag) } // If arr[1] has an argument within it, resolve it to its // value. Otherwise just return the value found. from, fromErr := imagebuilder.ProcessWord(arr[1], s.stage.Builder.Arguments()) if fromErr != nil { - return "", nil, fmt.Errorf("unable to resolve argument %q: %w", arr[1], fromErr) + return "", nil, false, fmt.Errorf("unable to resolve argument %q: %w", arr[1], fromErr) } // Before looking into additional context @@ -1133,7 +1250,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, // replace with image set in build context from = additionalBuildContext.Value if _, err := s.getImageRootfs(ctx, from); err != nil { - return "", nil, fmt.Errorf("%s --from=%s: no stage or image found with that name", command, from) + return "", nil, false, fmt.Errorf("%s --from=%s: no stage or image found with that name", command, from) } break } @@ -1143,12 +1260,12 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, // result of an earlier stage, wait for that // stage to finish being built. if isStage, err := s.executor.waitForStage(ctx, from, s.stages[:s.index]); isStage && err != nil { - return "", nil, err + return "", nil, false, err } if otherStage, ok := s.executor.stages[from]; ok && otherStage.index < s.index { break } else if _, err = s.getImageRootfs(ctx, from); err != nil { - return "", nil, fmt.Errorf("%s --from=%s: no stage or image found with that name", command, from) + return "", nil, false, fmt.Errorf("%s --from=%s: no stage or image found with that name", command, from) } break } @@ -1170,7 +1287,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, err := ib.Run(step, s, noRunsRemaining) if err != nil { logrus.Debugf("Error building at step %+v: %v", *step, err) - return "", nil, fmt.Errorf("building at STEP \"%s\": %w", step.Message, err) + return "", nil, false, fmt.Errorf("building at STEP \"%s\": %w", step.Message, err) } // In case we added content, retrieve its digest. addedContentSummary := s.getContentSummaryAfterAddingContent() @@ -1193,15 +1310,15 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, // stage. if lastStage || imageIsUsedLater { logCommit(s.output, i) - imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), false, s.output, s.executor.squash) + imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), false, s.output, s.executor.squash, lastStage && lastInstruction) if err != nil { - return "", nil, fmt.Errorf("committing container for step %+v: %w", *step, err) + return "", nil, false, fmt.Errorf("committing container for step %+v: %w", *step, err) } logImageID(imgID) // Generate build output if needed. if canGenerateBuildOutput { if err := s.generateBuildOutput(buildOutputOption); err != nil { - return "", nil, err + return "", nil, false, err } } } else { @@ -1233,7 +1350,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, for _, a := range node.Flags { arg, err := imagebuilder.ProcessWord(a, s.stage.Builder.Arguments()) if err != nil { - return "", nil, err + return "", nil, false, err } switch { case strings.HasPrefix(arg, "--mount="): @@ -1245,7 +1362,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, } stageMountPoints, err := s.runStageMountPoints(mounts) if err != nil { - return "", nil, err + return "", nil, false, err } for _, mountPoint := range stageMountPoints { if mountPoint.DidExecute { @@ -1267,7 +1384,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, if needsCacheKey { cacheKey, err = s.generateCacheKey(ctx, node, addedContentSummary, s.stepRequiresLayer(step)) if err != nil { - return "", nil, fmt.Errorf("failed while generating cache key: %w", err) + return "", nil, false, fmt.Errorf("failed while generating cache key: %w", err) } } // Check if there's already an image based on our parent that @@ -1287,7 +1404,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, s.didExecute = true if err = ib.Run(step, s, noRunsRemaining); err != nil { logrus.Debugf("Error building at step %+v: %v", *step, err) - return "", nil, fmt.Errorf("building at STEP \"%s\": %w", step.Message, err) + return "", nil, false, fmt.Errorf("building at STEP \"%s\": %w", step.Message, err) } // Retrieve the digest info for the content that we just copied // into the rootfs. @@ -1296,13 +1413,13 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, if needsCacheKey { cacheKey, err = s.generateCacheKey(ctx, node, addedContentSummary, s.stepRequiresLayer(step)) if err != nil { - return "", nil, fmt.Errorf("failed while generating cache key: %w", err) + return "", nil, false, fmt.Errorf("failed while generating cache key: %w", err) } } } cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step)) if err != nil { - return "", nil, fmt.Errorf("checking if cached image exists from a previous build: %w", err) + return "", nil, false, fmt.Errorf("checking if cached image exists from a previous build: %w", err) } // All the best effort to find cache on localstorage have failed try pulling // cache from remote repo if `--cache-from` was configured. @@ -1314,7 +1431,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, logCachePulled(cacheKey, ref) cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step)) if err != nil { - return "", nil, fmt.Errorf("checking if cached image exists from a previous build: %w", err) + return "", nil, false, fmt.Errorf("checking if cached image exists from a previous build: %w", err) } if cacheID != "" { pulledAndUsedCacheImage = true @@ -1334,7 +1451,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, s.didExecute = true if err = ib.Run(step, s, noRunsRemaining); err != nil { logrus.Debugf("Error building at step %+v: %v", *step, err) - return "", nil, fmt.Errorf("building at STEP \"%s\": %w", step.Message, err) + return "", nil, false, fmt.Errorf("building at STEP \"%s\": %w", step.Message, err) } // In case we added content, retrieve its digest. @@ -1343,7 +1460,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, if needsCacheKey { cacheKey, err = s.generateCacheKey(ctx, node, addedContentSummary, s.stepRequiresLayer(step)) if err != nil { - return "", nil, fmt.Errorf("failed while generating cache key: %w", err) + return "", nil, false, fmt.Errorf("failed while generating cache key: %w", err) } } @@ -1352,7 +1469,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, if checkForLayers && !avoidLookingCache { cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step)) if err != nil { - return "", nil, fmt.Errorf("checking if cached image exists from a previous build: %w", err) + return "", nil, false, fmt.Errorf("checking if cached image exists from a previous build: %w", err) } // All the best effort to find cache on localstorage have failed try pulling // cache from remote repo if `--cache-from` was configured and cacheKey was @@ -1365,7 +1482,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, logCachePulled(cacheKey, ref) cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step)) if err != nil { - return "", nil, fmt.Errorf("checking if cached image exists from a previous build: %w", err) + return "", nil, false, fmt.Errorf("checking if cached image exists from a previous build: %w", err) } if cacheID != "" { pulledAndUsedCacheImage = true @@ -1389,13 +1506,13 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, err := ib.Run(step, s, noRunsRemaining) if err != nil { logrus.Debugf("Error building at step %+v: %v", *step, err) - return "", nil, fmt.Errorf("building at STEP \"%s\": %w", step.Message, err) + return "", nil, false, fmt.Errorf("building at STEP \"%s\": %w", step.Message, err) } } } // Note: If the build has squash, we must try to re-use as many layers as possible if cache is found. - // So only perform commit if its the lastInstruction of lastStage. + // So only perform commit if it's the lastInstruction of lastStage. if cacheID != "" { logCacheHit(cacheID) // A suitable cached image was found, so we can just @@ -1406,7 +1523,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, if commitName != "" { logCommit(commitName, i) if imgID, ref, err = s.tagExistingImage(ctx, cacheID, commitName); err != nil { - return "", nil, err + return "", nil, false, err } } } else { @@ -1419,15 +1536,15 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, // While committing we always set squash to false here // because at this point we want to save history for // layers even if its a squashed build so that they - // can be part of build-cache. - imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), !s.stepRequiresLayer(step), commitName, false) + // can be part of the build cache. + imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), !s.stepRequiresLayer(step), commitName, false, lastStage && lastInstruction) if err != nil { - return "", nil, fmt.Errorf("committing container for step %+v: %w", *step, err) + return "", nil, false, fmt.Errorf("committing container for step %+v: %w", *step, err) } // Generate build output if needed. if canGenerateBuildOutput { if err := s.generateBuildOutput(buildOutputOption); err != nil { - return "", nil, err + return "", nil, false, err } } } @@ -1445,23 +1562,23 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, if len(s.executor.cacheTo) != 0 && (!pulledAndUsedCacheImage || cacheID == "") && needsCacheKey { logCachePush(cacheKey) if err = s.pushCache(ctx, imgID, cacheKey); err != nil { - return "", nil, err + return "", nil, false, err } } if lastInstruction && lastStage { - if s.executor.squash { + if s.executor.squash || s.executor.confidentialWorkload.Convert { // Create a squashed version of this image // if we're supposed to create one and this // is the last instruction of the last stage. - imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), !s.stepRequiresLayer(step), commitName, true) + imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), !s.stepRequiresLayer(step), commitName, true, lastStage && lastInstruction) if err != nil { - return "", nil, fmt.Errorf("committing final squash step %+v: %w", *step, err) + return "", nil, false, fmt.Errorf("committing final squash step %+v: %w", *step, err) } // Generate build output if needed. if canGenerateBuildOutput { if err := s.generateBuildOutput(buildOutputOption); err != nil { - return "", nil, err + return "", nil, false, err } } } else if cacheID != "" { @@ -1476,7 +1593,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, // Generate build output if needed. if canGenerateBuildOutput { if err := s.generateBuildOutput(buildOutputOption); err != nil { - return "", nil, err + return "", nil, false, err } } } @@ -1507,11 +1624,12 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, // ID that we really should not be pulling anymore (see // containers/podman/issues/10307). if _, err := s.prepare(ctx, imgID, false, true, true, define.PullNever); err != nil { - return "", nil, fmt.Errorf("preparing container for next step: %w", err) + return "", nil, false, fmt.Errorf("preparing container for next step: %w", err) } } } - return imgID, ref, nil + + return imgID, ref, onlyBaseImage, nil } func historyEntriesEqual(base, derived v1.History) bool { @@ -1732,7 +1850,7 @@ func (s *StageExecutor) tagExistingImage(ctx context.Context, cacheID, output st if err != nil { return "", nil, fmt.Errorf("computing digest of manifest for image %q: %w", cacheID, err) } - img, err := is.Transport.GetStoreImage(s.executor.store, dest) + _, img, err := is.ResolveReference(dest) if err != nil { return "", nil, fmt.Errorf("locating new copy of image %q (i.e., %q): %w", cacheID, transports.ImageName(dest), err) } @@ -1941,7 +2059,7 @@ func (s *StageExecutor) intermediateImageExists(ctx context.Context, currNode *p // commit writes the container's contents to an image, using a passed-in tag as // the name if there is one, generating a unique ID-based one otherwise. // or commit via any custom exporter if specified. -func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer bool, output string, squash bool) (string, reference.Canonical, error) { +func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer bool, output string, squash, finalInstruction bool) (string, reference.Canonical, error) { ib := s.stage.Builder var imageRef types.ImageReference if output != "" { @@ -2016,12 +2134,28 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer } s.builder.ClearLabels() + if output == "" { + // If output is not set then we are committing + // an intermediate image, in such case we must + // honor layer labels if they are configured. + for _, labelString := range s.executor.layerLabels { + label := strings.SplitN(labelString, "=", 2) + if len(label) > 1 { + s.builder.SetLabel(label[0], label[1]) + } else { + s.builder.SetLabel(label[0], "") + } + } + } for k, v := range config.Labels { s.builder.SetLabel(k, v) } if s.executor.commonBuildOptions.IdentityLabel == types.OptionalBoolUndefined || s.executor.commonBuildOptions.IdentityLabel == types.OptionalBoolTrue { s.builder.SetLabel(buildah.BuilderIdentityAnnotation, define.Version) } + for _, key := range s.executor.unsetLabels { + s.builder.UnsetLabel(key) + } for _, annotationSpec := range s.executor.annotations { annotation := strings.SplitN(annotationSpec, "=", 2) if len(annotation) > 1 { @@ -2056,6 +2190,9 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer HistoryTimestamp: s.executor.timestamp, Manifest: s.executor.manifest, } + if finalInstruction { + options.ConfidentialWorkloadOptions = s.executor.confidentialWorkload + } imgID, _, manifestDigest, err := s.builder.Commit(ctx, imageRef, options) if err != nil { return "", nil, err diff --git a/vendor/github.com/containers/buildah/import.go b/vendor/github.com/containers/buildah/import.go index 9f925a1d..88f732ab 100644 --- a/vendor/github.com/containers/buildah/import.go +++ b/vendor/github.com/containers/buildah/import.go @@ -107,6 +107,7 @@ func importBuilderDataFromImage(ctx context.Context, store storage.Store, system GIDMap: gidmap, }, NetworkInterface: netInt, + CommonBuildOpts: &CommonBuildOptions{}, } if err := builder.initConfig(ctx, image, systemContext); err != nil { diff --git a/vendor/github.com/containers/buildah/install.md b/vendor/github.com/containers/buildah/install.md index 7dae3030..6ad735ae 100644 --- a/vendor/github.com/containers/buildah/install.md +++ b/vendor/github.com/containers/buildah/install.md @@ -23,11 +23,11 @@ sudo yum -y install buildah #### [Debian](https://debian.org) The buildah package is available in -the [Bullseye](https://packages.debian.org/bullseye/buildah), which -is the current stable release (Debian 11), as well as Debian Unstable/Sid. +the [Bookworm](https://packages.debian.org/bookworm/buildah), which +is the current stable release (Debian 12), as well as Debian Unstable/Sid. ```bash -# Debian Stable/Bullseye or Unstable/Sid +# Debian Stable/Bookworm or Unstable/Sid sudo apt-get update sudo apt-get -y install buildah ``` @@ -161,7 +161,6 @@ Prior to installing Buildah, install the following packages on your Linux distro * bats * btrfs-progs-devel * bzip2 -* device-mapper-devel * git * go-md2man * gpgme-devel @@ -181,7 +180,6 @@ In Fedora, you can use this command: golang \ bats \ btrfs-progs-devel \ - device-mapper-devel \ glib2-devel \ gpgme-devel \ libassuan-devel \ @@ -216,7 +214,6 @@ In RHEL and CentOS, run this command to install the build dependencies: golang \ bats \ btrfs-progs-devel \ - device-mapper-devel \ glib2-devel \ gpgme-devel \ libassuan-devel \ @@ -242,7 +239,6 @@ On openSUSE Tumbleweed, install go via `zypper in go`, then run this command: bzip2 \ libgpgme-devel \ libseccomp-devel \ - device-mapper-devel \ libbtrfs-devel \ go-md2man ``` @@ -250,43 +246,25 @@ On openSUSE Tumbleweed, install go via `zypper in go`, then run this command: The build steps for Buildah on SUSE / openSUSE are the same as for Fedora, above. -### Ubuntu +### Ubuntu/Debian -In Ubuntu jammy you can use these commands: +In Ubuntu 22.10 (Karmic) or Debian 12 (Bookworm) you can use these commands: ``` sudo apt-get -y -qq update - sudo apt-get -y install bats btrfs-progs git libapparmor-dev libdevmapper-dev libglib2.0-dev libgpgme11-dev libseccomp-dev libselinux1-dev skopeo go-md2man make - sudo apt-get -y install golang-1.18 + sudo apt-get -y install bats btrfs-progs git go-md2man golang libapparmor-dev libglib2.0-dev libgpgme11-dev libseccomp-dev libselinux1-dev make skopeo ``` -Then to install Buildah on Ubuntu follow the steps in this example: + +Then to install Buildah follow the steps in this example: ``` - mkdir ~/buildah - cd ~/buildah - export GOPATH=`pwd` - git clone https://github.com/containers/buildah ./src/github.com/containers/buildah - cd ./src/github.com/containers/buildah - PATH=/usr/lib/go-1.18/bin:$PATH make runc all SECURITYTAGS="apparmor seccomp" + git clone https://github.com/containers/buildah + cd buildah + make runc all SECURITYTAGS="apparmor seccomp" sudo make install install.runc buildah --help ``` -### Debian - -To install the required dependencies, you can use those commands, tested under Debian GNU/Linux amd64 9.3 (stretch): - -``` -gpg --recv-keys 0x018BA5AD9DF57A4448F0E6CF8BECF1637AD8C79D -sudo gpg --export 0x018BA5AD9DF57A4448F0E6CF8BECF1637AD8C79D >> /usr/share/keyrings/projectatomic-ppa.gpg -sudo echo 'deb [signed-by=/usr/share/keyrings/projectatomic-ppa.gpg] http://ppa.launchpad.net/projectatomic/ppa/ubuntu zesty main' > /etc/apt/sources.list.d/projectatomic-ppa.list -sudo apt update -sudo apt -y install -t stretch-backports golang -sudo apt -y install bats btrfs-tools git libapparmor-dev libdevmapper-dev libglib2.0-dev libgpgme11-dev libseccomp-dev libselinux1-dev skopeo-containers go-md2man -``` - -The build steps on Debian are otherwise the same as Ubuntu, above. - ## Vendoring - Dependency Management This project is using [go modules](https://github.com/golang/go/wiki/Modules) for dependency management. If the CI is complaining about a pull request leaving behind an unclean state, it is very likely right about it. After changing dependencies, make sure to run `make vendor-in-container` to synchronize the code with the go module and repopulate the `./vendor` directory. @@ -319,7 +297,7 @@ cat /etc/containers/registries.conf # and 'registries.block'. [registries.search] -registries = ['docker.io', 'registry.fedoraproject.org', 'quay.io', 'registry.access.redhat.com', 'registry.centos.org'] +registries = ['docker.io', 'registry.fedoraproject.org', 'quay.io', 'registry.access.redhat.com'] # If you need to access insecure registries, add the registry's fully-qualified name. # An insecure registry is one that does not have a valid SSL certificate or only does HTTP. diff --git a/vendor/github.com/containers/buildah/internal/config/convert.go b/vendor/github.com/containers/buildah/internal/config/convert.go new file mode 100644 index 00000000..7287c671 --- /dev/null +++ b/vendor/github.com/containers/buildah/internal/config/convert.go @@ -0,0 +1,121 @@ +package config + +import ( + "github.com/containers/image/v5/manifest" + dockerclient "github.com/fsouza/go-dockerclient" +) + +// Schema2ConfigFromGoDockerclientConfig converts a go-dockerclient Config +// structure to a manifest Schema2Config. +func Schema2ConfigFromGoDockerclientConfig(config *dockerclient.Config) *manifest.Schema2Config { + overrideExposedPorts := make(map[manifest.Schema2Port]struct{}) + for port := range config.ExposedPorts { + overrideExposedPorts[manifest.Schema2Port(port)] = struct{}{} + } + var overrideHealthCheck *manifest.Schema2HealthConfig + if config.Healthcheck != nil { + overrideHealthCheck = &manifest.Schema2HealthConfig{ + Test: config.Healthcheck.Test, + StartPeriod: config.Healthcheck.StartPeriod, + Interval: config.Healthcheck.Interval, + Timeout: config.Healthcheck.Timeout, + Retries: config.Healthcheck.Retries, + } + } + labels := make(map[string]string) + for k, v := range config.Labels { + labels[k] = v + } + volumes := make(map[string]struct{}) + for v := range config.Volumes { + volumes[v] = struct{}{} + } + s2config := &manifest.Schema2Config{ + Hostname: config.Hostname, + Domainname: config.Domainname, + User: config.User, + AttachStdin: config.AttachStdin, + AttachStdout: config.AttachStdout, + AttachStderr: config.AttachStderr, + ExposedPorts: overrideExposedPorts, + Tty: config.Tty, + OpenStdin: config.OpenStdin, + StdinOnce: config.StdinOnce, + Env: append([]string{}, config.Env...), + Cmd: append([]string{}, config.Cmd...), + Healthcheck: overrideHealthCheck, + ArgsEscaped: config.ArgsEscaped, + Image: config.Image, + Volumes: volumes, + WorkingDir: config.WorkingDir, + Entrypoint: append([]string{}, config.Entrypoint...), + NetworkDisabled: config.NetworkDisabled, + MacAddress: config.MacAddress, + OnBuild: append([]string{}, config.OnBuild...), + Labels: labels, + StopSignal: config.StopSignal, + Shell: config.Shell, + } + if config.StopTimeout != 0 { + s2config.StopTimeout = &config.StopTimeout + } + return s2config +} + +// GoDockerclientConfigFromSchema2Config converts a manifest Schema2Config +// to a go-dockerclient config structure. +func GoDockerclientConfigFromSchema2Config(s2config *manifest.Schema2Config) *dockerclient.Config { + overrideExposedPorts := make(map[dockerclient.Port]struct{}) + for port := range s2config.ExposedPorts { + overrideExposedPorts[dockerclient.Port(port)] = struct{}{} + } + var healthCheck *dockerclient.HealthConfig + if s2config.Healthcheck != nil { + healthCheck = &dockerclient.HealthConfig{ + Test: s2config.Healthcheck.Test, + StartPeriod: s2config.Healthcheck.StartPeriod, + Interval: s2config.Healthcheck.Interval, + Timeout: s2config.Healthcheck.Timeout, + Retries: s2config.Healthcheck.Retries, + } + } + labels := make(map[string]string) + for k, v := range s2config.Labels { + labels[k] = v + } + volumes := make(map[string]struct{}) + for v := range s2config.Volumes { + volumes[v] = struct{}{} + } + config := &dockerclient.Config{ + Hostname: s2config.Hostname, + Domainname: s2config.Domainname, + User: s2config.User, + AttachStdin: s2config.AttachStdin, + AttachStdout: s2config.AttachStdout, + AttachStderr: s2config.AttachStderr, + PortSpecs: nil, + ExposedPorts: overrideExposedPorts, + Tty: s2config.Tty, + OpenStdin: s2config.OpenStdin, + StdinOnce: s2config.StdinOnce, + Env: append([]string{}, s2config.Env...), + Cmd: append([]string{}, s2config.Cmd...), + Healthcheck: healthCheck, + ArgsEscaped: s2config.ArgsEscaped, + Image: s2config.Image, + Volumes: volumes, + WorkingDir: s2config.WorkingDir, + Entrypoint: append([]string{}, s2config.Entrypoint...), + NetworkDisabled: s2config.NetworkDisabled, + MacAddress: s2config.MacAddress, + OnBuild: append([]string{}, s2config.OnBuild...), + Labels: labels, + StopSignal: s2config.StopSignal, + Shell: s2config.Shell, + } + if s2config.StopTimeout != nil { + config.StopTimeout = *s2config.StopTimeout + } + return config +} diff --git a/vendor/github.com/containers/buildah/internal/config/executor.go b/vendor/github.com/containers/buildah/internal/config/executor.go new file mode 100644 index 00000000..19b1429b --- /dev/null +++ b/vendor/github.com/containers/buildah/internal/config/executor.go @@ -0,0 +1,45 @@ +package config + +import ( + "errors" + "fmt" + "os" + + dockerclient "github.com/fsouza/go-dockerclient" + "github.com/openshift/imagebuilder" +) + +// configOnlyExecutor implements the Executor interface that an +// imagebuilder.Builder expects to be able to call to do some heavy lifting, +// but it just refuses to do the work of ADD, COPY, or RUN. It also doesn't +// care if the working directory exists in a container, because it's really +// only concerned with letting the Builder's RunConfig get updated by changes +// from a Dockerfile. Try anything more than that and it'll return an error. +type configOnlyExecutor struct{} + +func (g *configOnlyExecutor) Preserve(path string) error { + return errors.New("ADD/COPY/RUN not supported as changes") +} + +func (g *configOnlyExecutor) EnsureContainerPath(path string) error { + return nil +} + +func (g *configOnlyExecutor) EnsureContainerPathAs(path, user string, mode *os.FileMode) error { + return nil +} + +func (g *configOnlyExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) error { + if len(copies) == 0 { + return nil + } + return errors.New("ADD/COPY not supported as changes") +} + +func (g *configOnlyExecutor) Run(run imagebuilder.Run, config dockerclient.Config) error { + return errors.New("RUN not supported as changes") +} + +func (g *configOnlyExecutor) UnrecognizedInstruction(step *imagebuilder.Step) error { + return fmt.Errorf("did not understand change instruction %q", step.Original) +} diff --git a/vendor/github.com/containers/buildah/internal/config/override.go b/vendor/github.com/containers/buildah/internal/config/override.go new file mode 100644 index 00000000..a1dfebf6 --- /dev/null +++ b/vendor/github.com/containers/buildah/internal/config/override.go @@ -0,0 +1,181 @@ +package config + +import ( + "fmt" + "os" + "strings" + + "github.com/containers/buildah/docker" + "github.com/containers/image/v5/manifest" + v1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/openshift/imagebuilder" +) + +// firstStringElseSecondString takes two strings, and returns the first +// string if it isn't empty, else the second string +func firstStringElseSecondString(first, second string) string { + if first != "" { + return first + } + return second +} + +// firstSliceElseSecondSlice takes two string slices, and returns the first +// slice of strings if it has contents, else the second slice +func firstSliceElseSecondSlice(first, second []string) []string { + if len(first) > 0 { + return append([]string{}, first...) + } + return append([]string{}, second...) +} + +// firstSlicePairElseSecondSlicePair takes two pairs of string slices, and +// returns the first pair of slices if either has contents, else the second +// pair +func firstSlicePairElseSecondSlicePair(firstA, firstB, secondA, secondB []string) ([]string, []string) { + if len(firstA) > 0 || len(firstB) > 0 { + return append([]string{}, firstA...), append([]string{}, firstB...) + } + return append([]string{}, secondA...), append([]string{}, secondB...) +} + +// mergeEnv combines variables from a and b into a single environment slice. if +// a and b both provide values for the same variable, the value from b is +// preferred +func mergeEnv(a, b []string) []string { + index := make(map[string]int) + results := make([]string, 0, len(a)+len(b)) + for _, kv := range append(append([]string{}, a...), b...) { + k, _, specifiesValue := strings.Cut(kv, "=") + if !specifiesValue { + if value, ok := os.LookupEnv(kv); ok { + kv = kv + "=" + value + } else { + kv = kv + "=" + } + } + if i, seen := index[k]; seen { + results[i] = kv + } else { + index[k] = len(results) + results = append(results, kv) + } + } + return results +} + +// Override takes a buildah docker config and an OCI ImageConfig, and applies a +// mixture of a slice of Dockerfile-style instructions and fields from a config +// blob to them both +func Override(dconfig *docker.Config, oconfig *v1.ImageConfig, overrideChanges []string, overrideConfig *manifest.Schema2Config) error { + if len(overrideChanges) > 0 { + if overrideConfig == nil { + overrideConfig = &manifest.Schema2Config{} + } + // Parse the set of changes as we would a Dockerfile. + changes := strings.Join(overrideChanges, "\n") + parsed, err := imagebuilder.ParseDockerfile(strings.NewReader(changes)) + if err != nil { + return fmt.Errorf("parsing change set %+v: %w", changes, err) + } + // Create a dummy builder object to process configuration-related + // instructions. + subBuilder := imagebuilder.NewBuilder(nil) + // Convert the incoming data into an initial RunConfig. + subBuilder.RunConfig = *GoDockerclientConfigFromSchema2Config(overrideConfig) + // Process the change instructions one by one. + for _, node := range parsed.Children { + var step imagebuilder.Step + if err := step.Resolve(node); err != nil { + return fmt.Errorf("resolving change %q: %w", node.Original, err) + } + if err := subBuilder.Run(&step, &configOnlyExecutor{}, true); err != nil { + return fmt.Errorf("processing change %q: %w", node.Original, err) + } + } + // Pull settings out of the dummy builder's RunConfig. + overrideConfig = Schema2ConfigFromGoDockerclientConfig(&subBuilder.RunConfig) + } + if overrideConfig != nil { + // Apply changes from a possibly-provided possibly-changed config struct. + dconfig.Hostname = firstStringElseSecondString(overrideConfig.Hostname, dconfig.Hostname) + dconfig.Domainname = firstStringElseSecondString(overrideConfig.Domainname, dconfig.Domainname) + dconfig.User = firstStringElseSecondString(overrideConfig.User, dconfig.User) + oconfig.User = firstStringElseSecondString(overrideConfig.User, oconfig.User) + dconfig.AttachStdin = overrideConfig.AttachStdin + dconfig.AttachStdout = overrideConfig.AttachStdout + dconfig.AttachStderr = overrideConfig.AttachStderr + if len(overrideConfig.ExposedPorts) > 0 { + dexposedPorts := make(map[docker.Port]struct{}) + oexposedPorts := make(map[string]struct{}) + for port := range dconfig.ExposedPorts { + dexposedPorts[port] = struct{}{} + } + for port := range overrideConfig.ExposedPorts { + dexposedPorts[docker.Port(port)] = struct{}{} + } + for port := range oconfig.ExposedPorts { + oexposedPorts[port] = struct{}{} + } + for port := range overrideConfig.ExposedPorts { + oexposedPorts[string(port)] = struct{}{} + } + dconfig.ExposedPorts = dexposedPorts + oconfig.ExposedPorts = oexposedPorts + } + dconfig.Tty = overrideConfig.Tty + dconfig.OpenStdin = overrideConfig.OpenStdin + dconfig.StdinOnce = overrideConfig.StdinOnce + if len(overrideConfig.Env) > 0 { + dconfig.Env = mergeEnv(dconfig.Env, overrideConfig.Env) + oconfig.Env = mergeEnv(oconfig.Env, overrideConfig.Env) + } + dconfig.Entrypoint, dconfig.Cmd = firstSlicePairElseSecondSlicePair(overrideConfig.Entrypoint, overrideConfig.Cmd, dconfig.Entrypoint, dconfig.Cmd) + oconfig.Entrypoint, oconfig.Cmd = firstSlicePairElseSecondSlicePair(overrideConfig.Entrypoint, overrideConfig.Cmd, oconfig.Entrypoint, oconfig.Cmd) + if overrideConfig.Healthcheck != nil { + dconfig.Healthcheck = &docker.HealthConfig{ + Test: append([]string{}, overrideConfig.Healthcheck.Test...), + Interval: overrideConfig.Healthcheck.Interval, + Timeout: overrideConfig.Healthcheck.Timeout, + StartPeriod: overrideConfig.Healthcheck.StartPeriod, + Retries: overrideConfig.Healthcheck.Retries, + } + } + dconfig.ArgsEscaped = overrideConfig.ArgsEscaped + dconfig.Image = firstStringElseSecondString(overrideConfig.Image, dconfig.Image) + if len(overrideConfig.Volumes) > 0 { + if dconfig.Volumes == nil { + dconfig.Volumes = make(map[string]struct{}) + } + if oconfig.Volumes == nil { + oconfig.Volumes = make(map[string]struct{}) + } + for volume := range overrideConfig.Volumes { + dconfig.Volumes[volume] = struct{}{} + oconfig.Volumes[volume] = struct{}{} + } + } + dconfig.WorkingDir = firstStringElseSecondString(overrideConfig.WorkingDir, dconfig.WorkingDir) + oconfig.WorkingDir = firstStringElseSecondString(overrideConfig.WorkingDir, oconfig.WorkingDir) + dconfig.NetworkDisabled = overrideConfig.NetworkDisabled + dconfig.MacAddress = overrideConfig.MacAddress + dconfig.OnBuild = overrideConfig.OnBuild + if len(overrideConfig.Labels) > 0 { + if dconfig.Labels == nil { + dconfig.Labels = make(map[string]string) + } + if oconfig.Labels == nil { + oconfig.Labels = make(map[string]string) + } + for k, v := range overrideConfig.Labels { + dconfig.Labels[k] = v + oconfig.Labels[k] = v + } + } + dconfig.StopSignal = overrideConfig.StopSignal + oconfig.StopSignal = overrideConfig.StopSignal + dconfig.StopTimeout = overrideConfig.StopTimeout + dconfig.Shell = firstSliceElseSecondSlice(overrideConfig.Shell, dconfig.Shell) + } + return nil +} diff --git a/vendor/github.com/containers/buildah/internal/mkcw/archive.go b/vendor/github.com/containers/buildah/internal/mkcw/archive.go new file mode 100644 index 00000000..a0677e42 --- /dev/null +++ b/vendor/github.com/containers/buildah/internal/mkcw/archive.go @@ -0,0 +1,464 @@ +package mkcw + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "encoding/binary" + "encoding/json" + "errors" + "fmt" + "io" + "io/fs" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/containers/luksy" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/go-units" + digest "github.com/opencontainers/go-digest" + v1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" +) + +const minimumImageSize = 10 * 1024 * 1024 + +// ArchiveOptions includes optional settings for generating an archive. +type ArchiveOptions struct { + // If supplied, we'll register the workload with this server. + // Practically necessary if DiskEncryptionPassphrase is not set, in + // which case we'll generate one and throw it away after. + AttestationURL string + + // Used to measure the environment. If left unset (0, ""), defaults will be applied. + CPUs int + Memory int + + // Can be manually set. If left unset ("", false, nil), reasonable values will be used. + TempDir string + TeeType TeeType + IgnoreAttestationErrors bool + ImageSize int64 + WorkloadID string + Slop string + DiskEncryptionPassphrase string + FirmwareLibrary string + Logger *logrus.Logger +} + +type chainRetrievalError struct { + stderr string + err error +} + +func (c chainRetrievalError) Error() string { + if trimmed := strings.TrimSpace(c.stderr); trimmed != "" { + return fmt.Sprintf("retrieving SEV certificate chain: sevctl: %v: %v", strings.TrimSpace(c.stderr), c.err) + } + return fmt.Sprintf("retrieving SEV certificate chain: sevctl: %v", c.err) +} + +// Archive generates a WorkloadConfig for a specified directory and produces a +// tar archive of a container image's rootfs with the expected contents. +// The input directory will have a ".krun_config.json" file added to it while +// this function is running, but it will be removed on completion. +func Archive(path string, ociConfig *v1.Image, options ArchiveOptions) (io.ReadCloser, WorkloadConfig, error) { + const ( + teeDefaultCPUs = 2 + teeDefaultMemory = 512 + teeDefaultFilesystem = "ext4" + teeDefaultTeeType = SNP + ) + + if path == "" { + return nil, WorkloadConfig{}, fmt.Errorf("required path not specified") + } + logger := options.Logger + if logger == nil { + logger = logrus.StandardLogger() + } + + teeType := options.TeeType + if teeType == "" { + teeType = teeDefaultTeeType + } + cpus := options.CPUs + if cpus == 0 { + cpus = teeDefaultCPUs + } + memory := options.Memory + if memory == 0 { + memory = teeDefaultMemory + } + filesystem := teeDefaultFilesystem + workloadID := options.WorkloadID + if workloadID == "" { + digestInput := path + filesystem + time.Now().String() + workloadID = digest.Canonical.FromString(digestInput).Encoded() + } + workloadConfig := WorkloadConfig{ + Type: teeType, + WorkloadID: workloadID, + CPUs: cpus, + Memory: memory, + AttestationURL: options.AttestationURL, + } + + // Do things which are specific to the type of TEE we're building for. + var chainBytes []byte + var chainBytesFile string + var chainInfo fs.FileInfo + switch teeType { + default: + return nil, WorkloadConfig{}, fmt.Errorf("don't know how to generate TeeData for TEE type %q", teeType) + case SEV, SEV_NO_ES: + // If we need a certificate chain, get it. + chain, err := os.CreateTemp(options.TempDir, "chain") + if err != nil { + return nil, WorkloadConfig{}, err + } + chain.Close() + defer func() { + if err := os.Remove(chain.Name()); err != nil { + logger.Warnf("error removing temporary file %q: %v", chain.Name(), err) + } + }() + logrus.Debugf("sevctl export -f %s", chain.Name()) + cmd := exec.Command("sevctl", "export", "-f", chain.Name()) + var stdout, stderr bytes.Buffer + cmd.Stdout, cmd.Stderr = &stdout, &stderr + if err := cmd.Run(); err != nil { + if !options.IgnoreAttestationErrors { + return nil, WorkloadConfig{}, chainRetrievalError{stderr.String(), err} + } + logger.Warn(chainRetrievalError{stderr.String(), err}.Error()) + } + if chainBytes, err = os.ReadFile(chain.Name()); err != nil { + chainBytes = []byte{} + } + var teeData SevWorkloadData + if len(chainBytes) > 0 { + chainBytesFile = "sev.chain" + chainInfo, err = os.Stat(chain.Name()) + if err != nil { + return nil, WorkloadConfig{}, err + } + teeData.VendorChain = "/" + chainBytesFile + } + encodedTeeData, err := json.Marshal(teeData) + if err != nil { + return nil, WorkloadConfig{}, fmt.Errorf("encoding tee data: %w", err) + } + workloadConfig.TeeData = string(encodedTeeData) + case SNP: + teeData := SnpWorkloadData{ + Generation: "milan", + } + encodedTeeData, err := json.Marshal(teeData) + if err != nil { + return nil, WorkloadConfig{}, fmt.Errorf("encoding tee data: %w", err) + } + workloadConfig.TeeData = string(encodedTeeData) + } + + // Write part of the config blob where the krun init process will be + // looking for it. The oci2cw tool used `buildah inspect` output, but + // init is just looking for fields that have the right names in any + // object, and the image's config will have that, so let's try encoding + // it directly. + krunConfigPath := filepath.Join(path, ".krun_config.json") + krunConfigBytes, err := json.Marshal(ociConfig) + if err != nil { + return nil, WorkloadConfig{}, fmt.Errorf("creating .krun_config from image configuration: %w", err) + } + if err := ioutils.AtomicWriteFile(krunConfigPath, krunConfigBytes, 0o600); err != nil { + return nil, WorkloadConfig{}, fmt.Errorf("saving krun config: %w", err) + } + defer func() { + if err := os.Remove(krunConfigPath); err != nil { + logger.Warnf("removing krun configuration file: %v", err) + } + }() + + // Encode the workload config, in case it fails for any reason. + cleanedUpWorkloadConfig := workloadConfig + switch cleanedUpWorkloadConfig.Type { + default: + return nil, WorkloadConfig{}, fmt.Errorf("don't know how to canonicalize TEE type %q", cleanedUpWorkloadConfig.Type) + case SEV, SEV_NO_ES: + cleanedUpWorkloadConfig.Type = SEV + case SNP: + cleanedUpWorkloadConfig.Type = SNP + } + workloadConfigBytes, err := json.Marshal(cleanedUpWorkloadConfig) + if err != nil { + return nil, WorkloadConfig{}, err + } + + // Make sure we have the passphrase to use for encrypting the disk image. + diskEncryptionPassphrase := options.DiskEncryptionPassphrase + if diskEncryptionPassphrase == "" { + diskEncryptionPassphrase, err = GenerateDiskEncryptionPassphrase() + if err != nil { + return nil, WorkloadConfig{}, err + } + } + + // If we weren't told how big the image should be, get a rough estimate + // of the input data size, then add a hedge to it. + imageSize := slop(options.ImageSize, options.Slop) + if imageSize == 0 { + var sourceSize int64 + if err := filepath.WalkDir(path, func(path string, d fs.DirEntry, err error) error { + if err != nil && !errors.Is(err, os.ErrNotExist) && !errors.Is(err, os.ErrPermission) { + return err + } + info, err := d.Info() + if err != nil && !errors.Is(err, os.ErrNotExist) && !errors.Is(err, os.ErrPermission) { + return err + } + sourceSize += info.Size() + return nil + }); err != nil { + return nil, WorkloadConfig{}, err + } + imageSize = slop(sourceSize, options.Slop) + } + if imageSize%4096 != 0 { + imageSize += (4096 - (imageSize % 4096)) + } + if imageSize < minimumImageSize { + imageSize = minimumImageSize + } + + // Create a file to use as the unencrypted version of the disk image. + plain, err := os.CreateTemp(options.TempDir, "plain.img") + if err != nil { + return nil, WorkloadConfig{}, err + } + removePlain := true + defer func() { + if removePlain { + if err := os.Remove(plain.Name()); err != nil { + logger.Warnf("removing temporary file %q: %v", plain.Name(), err) + } + } + }() + + // Lengthen the plaintext disk image file. + if err := plain.Truncate(imageSize); err != nil { + plain.Close() + return nil, WorkloadConfig{}, err + } + plainInfo, err := plain.Stat() + plain.Close() + if err != nil { + return nil, WorkloadConfig{}, err + } + + // Format the disk image with the filesystem contents. + if _, stderr, err := MakeFS(path, plain.Name(), filesystem); err != nil { + if strings.TrimSpace(stderr) != "" { + return nil, WorkloadConfig{}, fmt.Errorf("%s: %w", strings.TrimSpace(stderr), err) + } + return nil, WorkloadConfig{}, err + } + + // If we're registering the workload, we can do that now. + if workloadConfig.AttestationURL != "" { + if err := SendRegistrationRequest(workloadConfig, diskEncryptionPassphrase, options.FirmwareLibrary, options.IgnoreAttestationErrors, logger); err != nil { + return nil, WorkloadConfig{}, err + } + } + + // Try to encrypt on the fly. + pipeReader, pipeWriter := io.Pipe() + removePlain = false + go func() { + var err error + defer func() { + if err := os.Remove(plain.Name()); err != nil { + logger.Warnf("removing temporary file %q: %v", plain.Name(), err) + } + if err != nil { + pipeWriter.CloseWithError(err) + } else { + pipeWriter.Close() + } + }() + plain, err := os.Open(plain.Name()) + if err != nil { + logrus.Errorf("opening unencrypted disk image %q: %v", plain.Name(), err) + return + } + defer plain.Close() + tw := tar.NewWriter(pipeWriter) + defer tw.Flush() + + // Write /entrypoint + var decompressedEntrypoint bytes.Buffer + decompressor, err := gzip.NewReader(bytes.NewReader(entrypointCompressedBytes)) + if err != nil { + logrus.Errorf("decompressing copy of entrypoint: %v", err) + return + } + defer decompressor.Close() + if _, err = io.Copy(&decompressedEntrypoint, decompressor); err != nil { + logrus.Errorf("decompressing copy of entrypoint: %v", err) + return + } + entrypointHeader, err := tar.FileInfoHeader(plainInfo, "") + if err != nil { + logrus.Errorf("building header for entrypoint: %v", err) + return + } + entrypointHeader.Name = "entrypoint" + entrypointHeader.Mode = 0o755 + entrypointHeader.Uname, entrypointHeader.Gname = "", "" + entrypointHeader.Uid, entrypointHeader.Gid = 0, 0 + entrypointHeader.Size = int64(decompressedEntrypoint.Len()) + if err = tw.WriteHeader(entrypointHeader); err != nil { + logrus.Errorf("writing header for %q: %v", entrypointHeader.Name, err) + return + } + if _, err = io.Copy(tw, &decompressedEntrypoint); err != nil { + logrus.Errorf("writing %q: %v", entrypointHeader.Name, err) + return + } + + // Write /sev.chain + if chainInfo != nil { + chainHeader, err := tar.FileInfoHeader(chainInfo, "") + if err != nil { + logrus.Errorf("building header for %q: %v", chainInfo.Name(), err) + return + } + chainHeader.Name = chainBytesFile + chainHeader.Mode = 0o600 + chainHeader.Uname, chainHeader.Gname = "", "" + chainHeader.Uid, chainHeader.Gid = 0, 0 + chainHeader.Size = int64(len(chainBytes)) + if err = tw.WriteHeader(chainHeader); err != nil { + logrus.Errorf("writing header for %q: %v", chainHeader.Name, err) + return + } + if _, err = tw.Write(chainBytes); err != nil { + logrus.Errorf("writing %q: %v", chainHeader.Name, err) + return + } + } + + // Write /krun-sev.json. + workloadConfigHeader, err := tar.FileInfoHeader(plainInfo, "") + if err != nil { + logrus.Errorf("building header for %q: %v", plainInfo.Name(), err) + return + } + workloadConfigHeader.Name = "krun-sev.json" + workloadConfigHeader.Mode = 0o600 + workloadConfigHeader.Uname, workloadConfigHeader.Gname = "", "" + workloadConfigHeader.Uid, workloadConfigHeader.Gid = 0, 0 + workloadConfigHeader.Size = int64(len(workloadConfigBytes)) + if err = tw.WriteHeader(workloadConfigHeader); err != nil { + logrus.Errorf("writing header for %q: %v", workloadConfigHeader.Name, err) + return + } + if _, err = tw.Write(workloadConfigBytes); err != nil { + logrus.Errorf("writing %q: %v", workloadConfigHeader.Name, err) + return + } + + // Write /tmp. + tmpHeader, err := tar.FileInfoHeader(plainInfo, "") + if err != nil { + logrus.Errorf("building header for %q: %v", plainInfo.Name(), err) + return + } + tmpHeader.Name = "tmp/" + tmpHeader.Typeflag = tar.TypeDir + tmpHeader.Mode = 0o1777 + tmpHeader.Uname, workloadConfigHeader.Gname = "", "" + tmpHeader.Uid, workloadConfigHeader.Gid = 0, 0 + tmpHeader.Size = 0 + if err = tw.WriteHeader(tmpHeader); err != nil { + logrus.Errorf("writing header for %q: %v", tmpHeader.Name, err) + return + } + + // Now figure out the footer that we'll append to the encrypted disk. + var footer bytes.Buffer + lengthBuffer := make([]byte, 8) + footer.Write(workloadConfigBytes) + footer.WriteString("KRUN") + binary.LittleEndian.PutUint64(lengthBuffer, uint64(len(workloadConfigBytes))) + footer.Write(lengthBuffer) + + // Start encrypting and write /disk.img. + header, encrypt, blockSize, err := luksy.EncryptV1([]string{diskEncryptionPassphrase}, "") + paddingBoundary := int64(4096) + paddingNeeded := (paddingBoundary - ((int64(len(header)) + imageSize + int64(footer.Len())) % paddingBoundary)) % paddingBoundary + diskHeader := workloadConfigHeader + diskHeader.Name = "disk.img" + diskHeader.Mode = 0o600 + diskHeader.Size = int64(len(header)) + imageSize + paddingNeeded + int64(footer.Len()) + if err = tw.WriteHeader(diskHeader); err != nil { + logrus.Errorf("writing archive header for disk.img: %v", err) + return + } + if _, err = io.Copy(tw, bytes.NewReader(header)); err != nil { + logrus.Errorf("writing encryption header for disk.img: %v", err) + return + } + encryptWrapper := luksy.EncryptWriter(encrypt, tw, blockSize) + if _, err = io.Copy(encryptWrapper, plain); err != nil { + logrus.Errorf("encrypting disk.img: %v", err) + return + } + encryptWrapper.Close() + if _, err = tw.Write(make([]byte, paddingNeeded)); err != nil { + logrus.Errorf("writing padding for disk.img: %v", err) + return + } + if _, err = io.Copy(tw, &footer); err != nil { + logrus.Errorf("writing footer for disk.img: %v", err) + return + } + tw.Close() + }() + + return pipeReader, workloadConfig, nil +} + +func slop(size int64, slop string) int64 { + if slop == "" { + return size * 5 / 4 + } + for _, factor := range strings.Split(slop, "+") { + factor = strings.TrimSpace(factor) + if factor == "" { + continue + } + if strings.HasSuffix(factor, "%") { + percentage := strings.TrimSuffix(factor, "%") + percent, err := strconv.ParseInt(percentage, 10, 8) + if err != nil { + logrus.Warnf("parsing percentage %q: %v", factor, err) + } else { + size *= (percent + 100) + size /= 100 + } + } else { + more, err := units.RAMInBytes(factor) + if err != nil { + logrus.Warnf("parsing %q as a size: %v", factor, err) + } else { + size += more + } + } + } + return size +} diff --git a/vendor/github.com/containers/buildah/internal/mkcw/attest.go b/vendor/github.com/containers/buildah/internal/mkcw/attest.go new file mode 100644 index 00000000..91362d37 --- /dev/null +++ b/vendor/github.com/containers/buildah/internal/mkcw/attest.go @@ -0,0 +1,250 @@ +package mkcw + +import ( + "bufio" + "bytes" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "os" + "os/exec" + "path" + "path/filepath" + "strings" + + "github.com/containers/buildah/internal/mkcw/types" + "github.com/sirupsen/logrus" +) + +type ( + RegistrationRequest = types.RegistrationRequest + TeeConfig = types.TeeConfig + TeeConfigFlags = types.TeeConfigFlags + TeeConfigMinFW = types.TeeConfigMinFW +) + +type measurementError struct { + err error +} + +func (m measurementError) Error() string { + return fmt.Sprintf("generating measurement for attestation: %v", m.err) +} + +type attestationError struct { + err error +} + +func (a attestationError) Error() string { + return fmt.Sprintf("registering workload: %v", a.err) +} + +type httpError struct { + statusCode int +} + +func (h httpError) Error() string { + if statusText := http.StatusText(h.statusCode); statusText != "" { + return fmt.Sprintf("received server status %d (%q)", h.statusCode, statusText) + } + return fmt.Sprintf("received server status %d", h.statusCode) +} + +// SendRegistrationRequest registers a workload with the specified decryption +// passphrase with the service whose location is part of the WorkloadConfig. +func SendRegistrationRequest(workloadConfig WorkloadConfig, diskEncryptionPassphrase, firmwareLibrary string, ignoreAttestationErrors bool, logger *logrus.Logger) error { + if workloadConfig.AttestationURL == "" { + return errors.New("attestation URL not provided") + } + + // Measure the execution environment. + measurement, err := GenerateMeasurement(workloadConfig, firmwareLibrary) + if err != nil { + if !ignoreAttestationErrors { + return &measurementError{err} + } + logger.Warnf("generating measurement for attestation: %v", err) + } + + // Build the workload registration (attestation) request body. + var teeConfigBytes []byte + switch workloadConfig.Type { + case SEV, SEV_NO_ES, SNP: + var cbits types.TeeConfigFlagBits + switch workloadConfig.Type { + case SEV: + cbits = types.SEV_CONFIG_NO_DEBUG | + types.SEV_CONFIG_NO_KEY_SHARING | + types.SEV_CONFIG_ENCRYPTED_STATE | + types.SEV_CONFIG_NO_SEND | + types.SEV_CONFIG_DOMAIN | + types.SEV_CONFIG_SEV + case SEV_NO_ES: + cbits = types.SEV_CONFIG_NO_DEBUG | + types.SEV_CONFIG_NO_KEY_SHARING | + types.SEV_CONFIG_NO_SEND | + types.SEV_CONFIG_DOMAIN | + types.SEV_CONFIG_SEV + case SNP: + cbits = types.SNP_CONFIG_SMT | + types.SNP_CONFIG_MANDATORY | + types.SNP_CONFIG_MIGRATE_MA | + types.SNP_CONFIG_DEBUG + default: + panic("internal error") // shouldn't happen + } + teeConfig := TeeConfig{ + Flags: TeeConfigFlags{ + Bits: cbits, + }, + MinFW: TeeConfigMinFW{ + Major: 0, + Minor: 0, + }, + } + teeConfigBytes, err = json.Marshal(teeConfig) + if err != nil { + return err + } + default: + return fmt.Errorf("don't know how to generate tee_config for %q TEEs", workloadConfig.Type) + } + + registrationRequest := RegistrationRequest{ + WorkloadID: workloadConfig.WorkloadID, + LaunchMeasurement: measurement, + TeeConfig: string(teeConfigBytes), + Passphrase: diskEncryptionPassphrase, + } + registrationRequestBytes, err := json.Marshal(registrationRequest) + if err != nil { + return err + } + + // Register the workload. + parsedURL, err := url.Parse(workloadConfig.AttestationURL) + if err != nil { + return err + } + parsedURL.Path = path.Join(parsedURL.Path, "/kbs/v0/register_workload") + if err != nil { + return err + } + url := parsedURL.String() + requestContentType := "application/json" + requestBody := bytes.NewReader(registrationRequestBytes) + defer http.DefaultClient.CloseIdleConnections() + resp, err := http.Post(url, requestContentType, requestBody) + if resp != nil { + if resp.Body != nil { + resp.Body.Close() + } + switch resp.StatusCode { + default: + if !ignoreAttestationErrors { + return &attestationError{&httpError{resp.StatusCode}} + } + logger.Warn(attestationError{&httpError{resp.StatusCode}}.Error()) + case http.StatusOK, http.StatusAccepted: + // great! + } + } + if err != nil { + if !ignoreAttestationErrors { + return &attestationError{err} + } + logger.Warn(attestationError{err}.Error()) + } + return nil +} + +// GenerateMeasurement generates the runtime measurement using the CPU count, +// memory size, and the firmware shared library, whatever it's called, wherever +// it is. +// If firmwareLibrary is a path, it will be the only one checked. +// If firmwareLibrary is a filename, it will be checked for in a hard-coded set +// of directories. +// If firmwareLibrary is empty, both the filename and the directory it is in +// will be taken from a hard-coded set of candidates. +func GenerateMeasurement(workloadConfig WorkloadConfig, firmwareLibrary string) (string, error) { + cpuString := fmt.Sprintf("%d", workloadConfig.CPUs) + memoryString := fmt.Sprintf("%d", workloadConfig.Memory) + var prefix string + switch workloadConfig.Type { + case SEV: + prefix = "SEV-ES" + case SEV_NO_ES: + prefix = "SEV" + case SNP: + prefix = "SNP" + default: + return "", fmt.Errorf("don't know which measurement to use for TEE type %q", workloadConfig.Type) + } + + sharedLibraryDirs := []string{ + "/usr/local/lib64", + "/usr/local/lib", + "/lib64", + "/lib", + "/usr/lib64", + "/usr/lib", + } + if llp, ok := os.LookupEnv("LD_LIBRARY_PATH"); ok { + sharedLibraryDirs = append(sharedLibraryDirs, strings.Split(llp, ":")...) + } + libkrunfwNames := []string{ + "libkrunfw-sev.so.4", + "libkrunfw-sev.so.3", + "libkrunfw-sev.so", + } + var pathsToCheck []string + if firmwareLibrary == "" { + for _, sharedLibraryDir := range sharedLibraryDirs { + if sharedLibraryDir == "" { + continue + } + for _, libkrunfw := range libkrunfwNames { + candidate := filepath.Join(sharedLibraryDir, libkrunfw) + pathsToCheck = append(pathsToCheck, candidate) + } + } + } else { + if filepath.IsAbs(firmwareLibrary) { + pathsToCheck = append(pathsToCheck, firmwareLibrary) + } else { + for _, sharedLibraryDir := range sharedLibraryDirs { + if sharedLibraryDir == "" { + continue + } + candidate := filepath.Join(sharedLibraryDir, firmwareLibrary) + pathsToCheck = append(pathsToCheck, candidate) + } + } + } + for _, candidate := range pathsToCheck { + if _, err := os.Lstat(candidate); err == nil { + var stdout, stderr bytes.Buffer + logrus.Debugf("krunfw_measurement -c %s -m %s %s", cpuString, memoryString, candidate) + cmd := exec.Command("krunfw_measurement", "-c", cpuString, "-m", memoryString, candidate) + cmd.Stdout = &stdout + cmd.Stderr = &stderr + if err := cmd.Run(); err != nil { + if stderr.Len() > 0 { + err = fmt.Errorf("krunfw_measurement: %s: %w", strings.TrimSpace(stderr.String()), err) + } + return "", err + } + scanner := bufio.NewScanner(&stdout) + for scanner.Scan() { + line := scanner.Text() + if strings.HasPrefix(line, prefix+":") { + return strings.TrimSpace(strings.TrimPrefix(line, prefix+":")), nil + } + } + return "", fmt.Errorf("generating measurement: no line starting with %q found in output from krunfw_measurement", prefix+":") + } + } + return "", fmt.Errorf("generating measurement: none of %v found: %w", pathsToCheck, os.ErrNotExist) +} diff --git a/vendor/github.com/containers/buildah/internal/mkcw/embed/entrypoint.gz b/vendor/github.com/containers/buildah/internal/mkcw/embed/entrypoint.gz new file mode 100644 index 0000000000000000000000000000000000000000..8576c155f14195564c980fdde6407b7c1c7a6432 GIT binary patch literal 405 zcmV;G0c!pqiwFonIL&1M17&V>a(QrXX>N1??V7(%!!QuWFQF|J4D<<*2S#KeCI*DE z0F{W45byvlF=^CdSBWdi#4GfXDhBc-ya$f|gf<{im4W4clJn)e+{KQ!==^#fUxYyb zo)FH!xL#y@wEpZ!J>-!?F$y maxWorkloadConfigSize { + return wc, fmt.Errorf("workload config in %q is %d bytes long, which seems unreasonable (max allowed %d)", path, length, maxWorkloadConfigSize) + } + + // Read and decode the config. + configBytes := make([]byte, length) + if _, err = f.Seek(-(int64(length) + 12), io.SeekEnd); err != nil { + return wc, fmt.Errorf("looking for workload config from disk image: %w", err) + } + if n, err := f.Read(configBytes); err != nil || n != len(configBytes) { + if err != nil { + return wc, fmt.Errorf("reading workload config from disk image: %w", err) + } + return wc, fmt.Errorf("short read (expected %d bytes near the end of %q, got %d)", len(configBytes), path, n) + } + err = json.Unmarshal(configBytes, &wc) + if err != nil { + err = fmt.Errorf("unmarshaling configuration %q: %w", string(configBytes), err) + } + return wc, err +} + +// WriteWorkloadConfigToImage writes the workload configuration to the +// specified disk image file, overwriting a previous configuration if it's +// asked to and it finds one +func WriteWorkloadConfigToImage(imageFile *os.File, workloadConfigBytes []byte, overwrite bool) error { + // Read those last 12 bytes to check if there's a configuration there already, which we should overwrite. + var overwriteOffset int64 + if overwrite { + finalTwelve := make([]byte, 12) + if _, err := imageFile.Seek(-12, io.SeekEnd); err != nil { + return fmt.Errorf("checking for workload config signature: %w", err) + } + if n, err := imageFile.Read(finalTwelve); err != nil || n != len(finalTwelve) { + if err != nil && !errors.Is(err, io.EOF) { + return fmt.Errorf("reading workload config signature (%d bytes read): %w", n, err) + } + if n != len(finalTwelve) { + return fmt.Errorf("short read (expected 12 bytes at the end of %q, got %d)", imageFile.Name(), n) + } + } + if magic := string(finalTwelve[0:4]); magic == "KRUN" { + length := binary.LittleEndian.Uint64(finalTwelve[4:]) + if length < maxWorkloadConfigSize { + overwriteOffset = int64(length + 12) + } + } + } + // If we found a configuration in the file, try to figure out how much padding was used. + paddingSize := int64(preferredPaddingBoundary) + if overwriteOffset != 0 { + st, err := imageFile.Stat() + if err != nil { + return err + } + for _, possiblePaddingLength := range []int64{0x100000, 0x10000, 0x1000, 0x200, 0x100} { + if overwriteOffset > possiblePaddingLength { + continue + } + if st.Size()%possiblePaddingLength != 0 { + continue + } + if _, err := imageFile.Seek(-possiblePaddingLength, io.SeekEnd); err != nil { + return fmt.Errorf("checking size of padding at end of file: %w", err) + } + buf := make([]byte, possiblePaddingLength) + n, err := imageFile.Read(buf) + if err != nil { + return fmt.Errorf("reading possible padding at end of file: %w", err) + } + if n != len(buf) { + return fmt.Errorf("short read checking size of padding at end of file: %d != %d", n, len(buf)) + } + if bytes.Equal(buf[:possiblePaddingLength-overwriteOffset], make([]byte, possiblePaddingLength-overwriteOffset)) { + // everything up to the configuration was zero bytes, so it was padding + overwriteOffset = possiblePaddingLength + paddingSize = possiblePaddingLength + break + } + } + } + + // Append the krun configuration to a new buffer. + var formatted bytes.Buffer + nWritten, err := formatted.Write(workloadConfigBytes) + if err != nil { + return fmt.Errorf("building workload config: %w", err) + } + if nWritten != len(workloadConfigBytes) { + return fmt.Errorf("short write appending configuration to buffer: %d != %d", nWritten, len(workloadConfigBytes)) + } + // Append the magic string to the buffer. + nWritten, err = formatted.WriteString(krunMagic) + if err != nil { + return fmt.Errorf("building workload config signature: %w", err) + } + if nWritten != len(krunMagic) { + return fmt.Errorf("short write appending krun magic to buffer: %d != %d", nWritten, len(krunMagic)) + } + // Append the 64-bit little-endian length of the workload configuration to the buffer. + workloadConfigLengthBytes := make([]byte, 8) + binary.LittleEndian.PutUint64(workloadConfigLengthBytes, uint64(len(workloadConfigBytes))) + nWritten, err = formatted.Write(workloadConfigLengthBytes) + if err != nil { + return fmt.Errorf("building workload config signature size: %w", err) + } + if nWritten != len(workloadConfigLengthBytes) { + return fmt.Errorf("short write appending configuration length to buffer: %d != %d", nWritten, len(workloadConfigLengthBytes)) + } + + // Build a copy of that data, with padding preceding it. + var padded bytes.Buffer + if int64(formatted.Len())%paddingSize != 0 { + extra := paddingSize - (int64(formatted.Len()) % paddingSize) + nWritten, err := padded.Write(make([]byte, extra)) + if err != nil { + return fmt.Errorf("buffering padding: %w", err) + } + if int64(nWritten) != extra { + return fmt.Errorf("short write buffering padding for disk image: %d != %d", nWritten, extra) + } + } + extra := int64(formatted.Len()) + nWritten, err = padded.Write(formatted.Bytes()) + if err != nil { + return fmt.Errorf("buffering workload config: %w", err) + } + if int64(nWritten) != extra { + return fmt.Errorf("short write buffering workload config: %d != %d", nWritten, extra) + } + + // Write the buffer to the file, starting with padding. + if _, err = imageFile.Seek(-overwriteOffset, io.SeekEnd); err != nil { + return fmt.Errorf("preparing to write workload config: %w", err) + } + nWritten, err = imageFile.Write(padded.Bytes()) + if err != nil { + return fmt.Errorf("writing workload config: %w", err) + } + if nWritten != padded.Len() { + return fmt.Errorf("short write writing configuration to disk image: %d != %d", nWritten, padded.Len()) + } + offset, err := imageFile.Seek(0, io.SeekCurrent) + if err != nil { + return fmt.Errorf("preparing mark end of disk image: %w", err) + } + if err = imageFile.Truncate(offset); err != nil { + return fmt.Errorf("marking end of disk image: %w", err) + } + return nil +} diff --git a/vendor/github.com/containers/buildah/internal/parse/parse.go b/vendor/github.com/containers/buildah/internal/parse/parse.go index 5050be38..89ff7d39 100644 --- a/vendor/github.com/containers/buildah/internal/parse/parse.go +++ b/vendor/github.com/containers/buildah/internal/parse/parse.go @@ -1,432 +1,15 @@ package parse import ( - "context" "fmt" "os" - "path" "path/filepath" - "strconv" "strings" - "errors" - - "github.com/containers/buildah/define" - "github.com/containers/buildah/internal" - internalUtil "github.com/containers/buildah/internal/util" "github.com/containers/common/pkg/parse" - "github.com/containers/image/v5/types" - "github.com/containers/storage" - "github.com/containers/storage/pkg/idtools" - "github.com/containers/storage/pkg/lockfile" - "github.com/containers/storage/pkg/unshare" specs "github.com/opencontainers/runtime-spec/specs-go" - selinux "github.com/opencontainers/selinux/go-selinux" -) - -const ( - // TypeTmpfs is the type for mounting tmpfs - TypeTmpfs = "tmpfs" - // TypeCache is the type for mounting a common persistent cache from host - TypeCache = "cache" - // mount=type=cache must create a persistent directory on host so its available for all consecutive builds. - // Lifecycle of following directory will be inherited from how host machine treats temporary directory - BuildahCacheDir = "buildah-cache" - // mount=type=cache allows users to lock a cache store while its being used by another build - BuildahCacheLockfile = "buildah-cache-lockfile" - // All the lockfiles are stored in a separate directory inside `BuildahCacheDir` - // Example `/var/tmp/buildah-cache//buildah-cache-lockfile` - BuildahCacheLockfileDir = "buildah-cache-lockfiles" -) - -var ( - errBadMntOption = errors.New("invalid mount option") - errBadOptionArg = errors.New("must provide an argument for option") - errBadVolDest = errors.New("must set volume destination") - errBadVolSrc = errors.New("must set volume source") - errDuplicateDest = errors.New("duplicate mount destination") ) -// GetBindMount parses a single bind mount entry from the --mount flag. -// Returns specifiedMount and a string which contains name of image that we mounted otherwise its empty. -// Caller is expected to perform unmount of any mounted images -func GetBindMount(ctx *types.SystemContext, args []string, contextDir string, store storage.Store, imageMountLabel string, additionalMountPoints map[string]internal.StageMountDetails, workDir string) (specs.Mount, string, error) { - newMount := specs.Mount{ - Type: define.TypeBind, - } - - mountReadability := false - setDest := false - bindNonRecursive := false - fromImage := "" - - for _, val := range args { - kv := strings.SplitN(val, "=", 2) - switch kv[0] { - case "type": - // This is already processed - continue - case "bind-nonrecursive": - newMount.Options = append(newMount.Options, "bind") - bindNonRecursive = true - case "ro", "nosuid", "nodev", "noexec": - // TODO: detect duplication of these options. - // (Is this necessary?) - newMount.Options = append(newMount.Options, kv[0]) - mountReadability = true - case "rw", "readwrite": - newMount.Options = append(newMount.Options, "rw") - mountReadability = true - case "readonly": - // Alias for "ro" - newMount.Options = append(newMount.Options, "ro") - mountReadability = true - case "shared", "rshared", "private", "rprivate", "slave", "rslave", "Z", "z", "U": - newMount.Options = append(newMount.Options, kv[0]) - case "from": - if len(kv) == 1 { - return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg) - } - fromImage = kv[1] - case "bind-propagation": - if len(kv) == 1 { - return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg) - } - newMount.Options = append(newMount.Options, kv[1]) - case "src", "source": - if len(kv) == 1 { - return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg) - } - newMount.Source = kv[1] - case "target", "dst", "destination": - if len(kv) == 1 { - return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg) - } - targetPath := kv[1] - if !path.IsAbs(targetPath) { - targetPath = filepath.Join(workDir, targetPath) - } - if err := parse.ValidateVolumeCtrDir(targetPath); err != nil { - return newMount, "", err - } - newMount.Destination = targetPath - setDest = true - case "consistency": - // Option for OS X only, has no meaning on other platforms - // and can thus be safely ignored. - // See also the handling of the equivalent "delegated" and "cached" in ValidateVolumeOpts - default: - return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadMntOption) - } - } - - // default mount readability is always readonly - if !mountReadability { - newMount.Options = append(newMount.Options, "ro") - } - - // Following variable ensures that we return imagename only if we did additional mount - isImageMounted := false - if fromImage != "" { - mountPoint := "" - if additionalMountPoints != nil { - if val, ok := additionalMountPoints[fromImage]; ok { - mountPoint = val.MountPoint - } - } - // if mountPoint of image was not found in additionalMap - // or additionalMap was nil, try mounting image - if mountPoint == "" { - image, err := internalUtil.LookupImage(ctx, store, fromImage) - if err != nil { - return newMount, "", err - } - - mountPoint, err = image.Mount(context.Background(), nil, imageMountLabel) - if err != nil { - return newMount, "", err - } - isImageMounted = true - } - contextDir = mountPoint - } - - // buildkit parity: default bind option must be `rbind` - // unless specified - if !bindNonRecursive { - newMount.Options = append(newMount.Options, "rbind") - } - - if !setDest { - return newMount, fromImage, errBadVolDest - } - - // buildkit parity: support absolute path for sources from current build context - if contextDir != "" { - // path should be /contextDir/specified path - newMount.Source = filepath.Join(contextDir, filepath.Clean(string(filepath.Separator)+newMount.Source)) - } else { - // looks like its coming from `build run --mount=type=bind` allow using absolute path - // error out if no source is set - if newMount.Source == "" { - return newMount, "", errBadVolSrc - } - if err := parse.ValidateVolumeHostDir(newMount.Source); err != nil { - return newMount, "", err - } - } - - opts, err := parse.ValidateVolumeOpts(newMount.Options) - if err != nil { - return newMount, fromImage, err - } - newMount.Options = opts - - if !isImageMounted { - // we don't want any cleanups if image was not mounted explicitly - // so dont return anything - fromImage = "" - } - - return newMount, fromImage, nil -} - -// CleanCacheMount gets the cache parent created by `--mount=type=cache` and removes it. -func CleanCacheMount() error { - cacheParent := filepath.Join(internalUtil.GetTempDir(), BuildahCacheDir+"-"+strconv.Itoa(unshare.GetRootlessUID())) - return os.RemoveAll(cacheParent) -} - -// GetCacheMount parses a single cache mount entry from the --mount flag. -// -// If this function succeeds and returns a non-nil *lockfile.LockFile, the caller must unlock it (when??). -func GetCacheMount(args []string, store storage.Store, imageMountLabel string, additionalMountPoints map[string]internal.StageMountDetails, workDir string) (specs.Mount, *lockfile.LockFile, error) { - var err error - var mode uint64 - var buildahLockFilesDir string - var ( - setDest bool - setShared bool - setReadOnly bool - foundSElinuxLabel bool - ) - fromStage := "" - newMount := specs.Mount{ - Type: define.TypeBind, - } - // if id is set a new subdirectory with `id` will be created under /host-temp/buildah-build-cache/id - id := "" - //buidkit parity: cache directory defaults to 755 - mode = 0o755 - //buidkit parity: cache directory defaults to uid 0 if not specified - uid := 0 - //buidkit parity: cache directory defaults to gid 0 if not specified - gid := 0 - // sharing mode - sharing := "shared" - - for _, val := range args { - kv := strings.SplitN(val, "=", 2) - switch kv[0] { - case "type": - // This is already processed - continue - case "nosuid", "nodev", "noexec": - // TODO: detect duplication of these options. - // (Is this necessary?) - newMount.Options = append(newMount.Options, kv[0]) - case "rw", "readwrite": - newMount.Options = append(newMount.Options, "rw") - case "readonly", "ro": - // Alias for "ro" - newMount.Options = append(newMount.Options, "ro") - setReadOnly = true - case "Z", "z": - newMount.Options = append(newMount.Options, kv[0]) - foundSElinuxLabel = true - case "shared", "rshared", "private", "rprivate", "slave", "rslave", "U": - newMount.Options = append(newMount.Options, kv[0]) - setShared = true - case "sharing": - sharing = kv[1] - case "bind-propagation": - if len(kv) == 1 { - return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) - } - newMount.Options = append(newMount.Options, kv[1]) - case "id": - if len(kv) == 1 { - return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) - } - id = kv[1] - case "from": - if len(kv) == 1 { - return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) - } - fromStage = kv[1] - case "target", "dst", "destination": - if len(kv) == 1 { - return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) - } - targetPath := kv[1] - if !path.IsAbs(targetPath) { - targetPath = filepath.Join(workDir, targetPath) - } - if err := parse.ValidateVolumeCtrDir(targetPath); err != nil { - return newMount, nil, err - } - newMount.Destination = targetPath - setDest = true - case "src", "source": - if len(kv) == 1 { - return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) - } - newMount.Source = kv[1] - case "mode": - if len(kv) == 1 { - return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) - } - mode, err = strconv.ParseUint(kv[1], 8, 32) - if err != nil { - return newMount, nil, fmt.Errorf("unable to parse cache mode: %w", err) - } - case "uid": - if len(kv) == 1 { - return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) - } - uid, err = strconv.Atoi(kv[1]) - if err != nil { - return newMount, nil, fmt.Errorf("unable to parse cache uid: %w", err) - } - case "gid": - if len(kv) == 1 { - return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) - } - gid, err = strconv.Atoi(kv[1]) - if err != nil { - return newMount, nil, fmt.Errorf("unable to parse cache gid: %w", err) - } - default: - return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadMntOption) - } - } - - // If selinux is enabled and no selinux option was configured - // default to `z` i.e shared content label. - if !foundSElinuxLabel && (selinux.EnforceMode() != selinux.Disabled) && fromStage == "" { - newMount.Options = append(newMount.Options, "z") - } - - if !setDest { - return newMount, nil, errBadVolDest - } - - if fromStage != "" { - // do not create cache on host - // instead use read-only mounted stage as cache - mountPoint := "" - if additionalMountPoints != nil { - if val, ok := additionalMountPoints[fromStage]; ok { - if val.IsStage { - mountPoint = val.MountPoint - } - } - } - // Cache does not supports using image so if not stage found - // return with error - if mountPoint == "" { - return newMount, nil, fmt.Errorf("no stage found with name %s", fromStage) - } - // path should be /contextDir/specified path - newMount.Source = filepath.Join(mountPoint, filepath.Clean(string(filepath.Separator)+newMount.Source)) - } else { - // we need to create cache on host if no image is being used - - // since type is cache and cache can be reused by consecutive builds - // create a common cache directory, which persists on hosts within temp lifecycle - // add subdirectory if specified - - // cache parent directory: creates separate cache parent for each user. - cacheParent := filepath.Join(internalUtil.GetTempDir(), BuildahCacheDir+"-"+strconv.Itoa(unshare.GetRootlessUID())) - // create cache on host if not present - err = os.MkdirAll(cacheParent, os.FileMode(0755)) - if err != nil { - return newMount, nil, fmt.Errorf("unable to create build cache directory: %w", err) - } - - if id != "" { - newMount.Source = filepath.Join(cacheParent, filepath.Clean(id)) - buildahLockFilesDir = filepath.Join(BuildahCacheLockfileDir, filepath.Clean(id)) - } else { - newMount.Source = filepath.Join(cacheParent, filepath.Clean(newMount.Destination)) - buildahLockFilesDir = filepath.Join(BuildahCacheLockfileDir, filepath.Clean(newMount.Destination)) - } - idPair := idtools.IDPair{ - UID: uid, - GID: gid, - } - //buildkit parity: change uid and gid if specified otheriwise keep `0` - err = idtools.MkdirAllAndChownNew(newMount.Source, os.FileMode(mode), idPair) - if err != nil { - return newMount, nil, fmt.Errorf("unable to change uid,gid of cache directory: %w", err) - } - - // create a subdirectory inside `cacheParent` just to store lockfiles - buildahLockFilesDir = filepath.Join(cacheParent, buildahLockFilesDir) - err = os.MkdirAll(buildahLockFilesDir, os.FileMode(0700)) - if err != nil { - return newMount, nil, fmt.Errorf("unable to create build cache lockfiles directory: %w", err) - } - } - - var targetLock *lockfile.LockFile // = nil - succeeded := false - defer func() { - if !succeeded && targetLock != nil { - targetLock.Unlock() - } - }() - switch sharing { - case "locked": - // lock parent cache - lockfile, err := lockfile.GetLockFile(filepath.Join(buildahLockFilesDir, BuildahCacheLockfile)) - if err != nil { - return newMount, nil, fmt.Errorf("unable to acquire lock when sharing mode is locked: %w", err) - } - // Will be unlocked after the RUN step is executed. - lockfile.Lock() - targetLock = lockfile - case "shared": - // do nothing since default is `shared` - break - default: - // error out for unknown values - return newMount, nil, fmt.Errorf("unrecognized value %q for field `sharing`: %w", sharing, err) - } - - // buildkit parity: default sharing should be shared - // unless specified - if !setShared { - newMount.Options = append(newMount.Options, "shared") - } - - // buildkit parity: cache must writable unless `ro` or `readonly` is configured explicitly - if !setReadOnly { - newMount.Options = append(newMount.Options, "rw") - } - - newMount.Options = append(newMount.Options, "bind") - - opts, err := parse.ValidateVolumeOpts(newMount.Options) - if err != nil { - return newMount, nil, err - } - newMount.Options = opts - - succeeded = true - return newMount, targetLock, nil -} - // ValidateVolumeMountHostDir validates the host path of buildah --volume func ValidateVolumeMountHostDir(hostDir string) error { if !filepath.IsAbs(hostDir) { @@ -467,22 +50,6 @@ func SplitStringWithColonEscape(str string) []string { return result } -func getVolumeMounts(volumes []string) (map[string]specs.Mount, error) { - finalVolumeMounts := make(map[string]specs.Mount) - - for _, volume := range volumes { - volumeMount, err := Volume(volume) - if err != nil { - return nil, err - } - if _, ok := finalVolumeMounts[volumeMount.Destination]; ok { - return nil, fmt.Errorf("%v: %w", volumeMount.Destination, errDuplicateDest) - } - finalVolumeMounts[volumeMount.Destination] = volumeMount - } - return finalVolumeMounts, nil -} - // Volume parses the input of --volume func Volume(volume string) (specs.Mount, error) { mount := specs.Mount{} @@ -510,178 +77,3 @@ func Volume(volume string) (specs.Mount, error) { mount.Options = mountOpts return mount, nil } - -// UnlockLockArray is a helper for cleaning up after GetVolumes and the like. -func UnlockLockArray(locks []*lockfile.LockFile) { - for _, lock := range locks { - lock.Unlock() - } -} - -// GetVolumes gets the volumes from --volume and --mount -// -// If this function succeeds, the caller must unlock the returned *lockfile.LockFile s if any (when??). -func GetVolumes(ctx *types.SystemContext, store storage.Store, volumes []string, mounts []string, contextDir string, workDir string) ([]specs.Mount, []string, []*lockfile.LockFile, error) { - unifiedMounts, mountedImages, targetLocks, err := getMounts(ctx, store, mounts, contextDir, workDir) - if err != nil { - return nil, mountedImages, nil, err - } - succeeded := false - defer func() { - if !succeeded { - UnlockLockArray(targetLocks) - } - }() - volumeMounts, err := getVolumeMounts(volumes) - if err != nil { - return nil, mountedImages, nil, err - } - for dest, mount := range volumeMounts { - if _, ok := unifiedMounts[dest]; ok { - return nil, mountedImages, nil, fmt.Errorf("%v: %w", dest, errDuplicateDest) - } - unifiedMounts[dest] = mount - } - - finalMounts := make([]specs.Mount, 0, len(unifiedMounts)) - for _, mount := range unifiedMounts { - finalMounts = append(finalMounts, mount) - } - succeeded = true - return finalMounts, mountedImages, targetLocks, nil -} - -// getMounts takes user-provided input from the --mount flag and creates OCI -// spec mounts. -// buildah run --mount type=bind,src=/etc/resolv.conf,target=/etc/resolv.conf ... -// buildah run --mount type=tmpfs,target=/dev/shm ... -// -// If this function succeeds, the caller must unlock the returned *lockfile.LockFile s if any (when??). -func getMounts(ctx *types.SystemContext, store storage.Store, mounts []string, contextDir string, workDir string) (map[string]specs.Mount, []string, []*lockfile.LockFile, error) { - // If `type` is not set default to "bind" - mountType := define.TypeBind - finalMounts := make(map[string]specs.Mount) - mountedImages := make([]string, 0) - targetLocks := make([]*lockfile.LockFile, 0) - succeeded := false - defer func() { - if !succeeded { - UnlockLockArray(targetLocks) - } - }() - - errInvalidSyntax := errors.New("incorrect mount format: should be --mount type=,[src=,]target=[,options]") - - // TODO(vrothberg): the manual parsing can be replaced with a regular expression - // to allow a more robust parsing of the mount format and to give - // precise errors regarding supported format versus supported options. - for _, mount := range mounts { - tokens := strings.Split(mount, ",") - if len(tokens) < 2 { - return nil, mountedImages, nil, fmt.Errorf("%q: %w", mount, errInvalidSyntax) - } - for _, field := range tokens { - if strings.HasPrefix(field, "type=") { - kv := strings.Split(field, "=") - if len(kv) != 2 { - return nil, mountedImages, nil, fmt.Errorf("%q: %w", mount, errInvalidSyntax) - } - mountType = kv[1] - } - } - switch mountType { - case define.TypeBind: - mount, image, err := GetBindMount(ctx, tokens, contextDir, store, "", nil, workDir) - if err != nil { - return nil, mountedImages, nil, err - } - if _, ok := finalMounts[mount.Destination]; ok { - return nil, mountedImages, nil, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest) - } - finalMounts[mount.Destination] = mount - mountedImages = append(mountedImages, image) - case TypeCache: - mount, tl, err := GetCacheMount(tokens, store, "", nil, workDir) - if err != nil { - return nil, mountedImages, nil, err - } - if tl != nil { - targetLocks = append(targetLocks, tl) - } - if _, ok := finalMounts[mount.Destination]; ok { - return nil, mountedImages, nil, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest) - } - finalMounts[mount.Destination] = mount - case TypeTmpfs: - mount, err := GetTmpfsMount(tokens) - if err != nil { - return nil, mountedImages, nil, err - } - if _, ok := finalMounts[mount.Destination]; ok { - return nil, mountedImages, nil, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest) - } - finalMounts[mount.Destination] = mount - default: - return nil, mountedImages, nil, fmt.Errorf("invalid filesystem type %q", mountType) - } - } - - succeeded = true - return finalMounts, mountedImages, targetLocks, nil -} - -// GetTmpfsMount parses a single tmpfs mount entry from the --mount flag -func GetTmpfsMount(args []string) (specs.Mount, error) { - newMount := specs.Mount{ - Type: TypeTmpfs, - Source: TypeTmpfs, - } - - setDest := false - - for _, val := range args { - kv := strings.SplitN(val, "=", 2) - switch kv[0] { - case "type": - // This is already processed - continue - case "ro", "nosuid", "nodev", "noexec": - newMount.Options = append(newMount.Options, kv[0]) - case "readonly": - // Alias for "ro" - newMount.Options = append(newMount.Options, "ro") - case "tmpcopyup": - //the path that is shadowed by the tmpfs mount is recursively copied up to the tmpfs itself. - newMount.Options = append(newMount.Options, kv[0]) - case "tmpfs-mode": - if len(kv) == 1 { - return newMount, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) - } - newMount.Options = append(newMount.Options, fmt.Sprintf("mode=%s", kv[1])) - case "tmpfs-size": - if len(kv) == 1 { - return newMount, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) - } - newMount.Options = append(newMount.Options, fmt.Sprintf("size=%s", kv[1])) - case "src", "source": - return newMount, errors.New("source is not supported with tmpfs mounts") - case "target", "dst", "destination": - if len(kv) == 1 { - return newMount, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) - } - if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil { - return newMount, err - } - newMount.Destination = kv[1] - setDest = true - default: - return newMount, fmt.Errorf("%v: %w", kv[0], errBadMntOption) - } - } - - if !setDest { - return newMount, errBadVolDest - } - - return newMount, nil -} diff --git a/vendor/github.com/containers/buildah/internal/tmpdir/tmpdir.go b/vendor/github.com/containers/buildah/internal/tmpdir/tmpdir.go new file mode 100644 index 00000000..ff966b20 --- /dev/null +++ b/vendor/github.com/containers/buildah/internal/tmpdir/tmpdir.go @@ -0,0 +1,26 @@ +package tmpdir + +import ( + "os" + "path/filepath" + + "github.com/containers/common/pkg/config" + "github.com/sirupsen/logrus" +) + +// GetTempDir returns the path of the preferred temporary directory on the host. +func GetTempDir() string { + if tmpdir, ok := os.LookupEnv("TMPDIR"); ok { + abs, err := filepath.Abs(tmpdir) + if err == nil { + return abs + } + logrus.Warnf("ignoring TMPDIR from environment, evaluating it: %v", err) + } + if containerConfig, err := config.Default(); err == nil { + if tmpdir, err := containerConfig.ImageCopyTmpDir(); err == nil { + return tmpdir + } + } + return "/var/tmp" +} diff --git a/vendor/github.com/containers/buildah/internal/util/util.go b/vendor/github.com/containers/buildah/internal/util/util.go index c945ca85..01f4b105 100644 --- a/vendor/github.com/containers/buildah/internal/util/util.go +++ b/vendor/github.com/containers/buildah/internal/util/util.go @@ -8,9 +8,8 @@ import ( "github.com/containers/buildah/define" "github.com/containers/common/libimage" + lplatform "github.com/containers/common/libimage/platform" "github.com/containers/image/v5/types" - encconfig "github.com/containers/ocicrypt/config" - enchelpers "github.com/containers/ocicrypt/helpers" "github.com/containers/storage" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/chrootarchive" @@ -43,7 +42,7 @@ func LookupImage(ctx *types.SystemContext, store storage.Store, image string) (* // Wrapper around libimage.NormalizePlatform to return and consume // v1.Platform instead of independent os, arch and variant. func NormalizePlatform(platform v1.Platform) v1.Platform { - os, arch, variant := libimage.NormalizePlatform(platform.OS, platform.Architecture, platform.Variant) + os, arch, variant := lplatform.Normalize(platform.OS, platform.Architecture, platform.Variant) return v1.Platform{ OS: os, Architecture: arch, @@ -51,14 +50,6 @@ func NormalizePlatform(platform v1.Platform) v1.Platform { } } -// GetTempDir returns base for a temporary directory on host. -func GetTempDir() string { - if tmpdir, ok := os.LookupEnv("TMPDIR"); ok { - return tmpdir - } - return "/var/tmp" -} - // ExportFromReader reads bytes from given reader and exports to external tar, directory or stdout. func ExportFromReader(input io.Reader, opts define.BuildOutputOption) error { var err error @@ -106,49 +97,3 @@ func ExportFromReader(input io.Reader, opts define.BuildOutputOption) error { } return nil } - -// DecryptConfig translates decryptionKeys into a DescriptionConfig structure -func DecryptConfig(decryptionKeys []string) (*encconfig.DecryptConfig, error) { - decryptConfig := &encconfig.DecryptConfig{} - if len(decryptionKeys) > 0 { - // decryption - dcc, err := enchelpers.CreateCryptoConfig([]string{}, decryptionKeys) - if err != nil { - return nil, fmt.Errorf("invalid decryption keys: %w", err) - } - cc := encconfig.CombineCryptoConfigs([]encconfig.CryptoConfig{dcc}) - decryptConfig = cc.DecryptConfig - } - - return decryptConfig, nil -} - -// EncryptConfig translates encryptionKeys into a EncriptionsConfig structure -func EncryptConfig(encryptionKeys []string, encryptLayers []int) (*encconfig.EncryptConfig, *[]int, error) { - var encLayers *[]int - var encConfig *encconfig.EncryptConfig - - if len(encryptionKeys) > 0 { - // encryption - encLayers = &encryptLayers - ecc, err := enchelpers.CreateCryptoConfig(encryptionKeys, []string{}) - if err != nil { - return nil, nil, fmt.Errorf("invalid encryption keys: %w", err) - } - cc := encconfig.CombineCryptoConfigs([]encconfig.CryptoConfig{ecc}) - encConfig = cc.EncryptConfig - } - return encConfig, encLayers, nil -} - -// GetFormat translates format string into either docker or OCI format constant -func GetFormat(format string) (string, error) { - switch format { - case define.OCI: - return define.OCIv1ImageManifest, nil - case define.DOCKER: - return define.Dockerv2ImageManifest, nil - default: - return "", fmt.Errorf("unrecognized image type %q", format) - } -} diff --git a/vendor/github.com/containers/buildah/internal/volumes/volumes.go b/vendor/github.com/containers/buildah/internal/volumes/volumes.go new file mode 100644 index 00000000..fd1ff7f9 --- /dev/null +++ b/vendor/github.com/containers/buildah/internal/volumes/volumes.go @@ -0,0 +1,642 @@ +package volumes + +import ( + "context" + "fmt" + "os" + "path" + "path/filepath" + "strconv" + "strings" + + "errors" + + "github.com/containers/buildah/copier" + "github.com/containers/buildah/define" + "github.com/containers/buildah/internal" + internalParse "github.com/containers/buildah/internal/parse" + "github.com/containers/buildah/internal/tmpdir" + internalUtil "github.com/containers/buildah/internal/util" + "github.com/containers/common/pkg/parse" + "github.com/containers/image/v5/types" + "github.com/containers/storage" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/lockfile" + "github.com/containers/storage/pkg/unshare" + specs "github.com/opencontainers/runtime-spec/specs-go" + selinux "github.com/opencontainers/selinux/go-selinux" +) + +const ( + // TypeTmpfs is the type for mounting tmpfs + TypeTmpfs = "tmpfs" + // TypeCache is the type for mounting a common persistent cache from host + TypeCache = "cache" + // mount=type=cache must create a persistent directory on host so its available for all consecutive builds. + // Lifecycle of following directory will be inherited from how host machine treats temporary directory + buildahCacheDir = "buildah-cache" + // mount=type=cache allows users to lock a cache store while its being used by another build + BuildahCacheLockfile = "buildah-cache-lockfile" + // All the lockfiles are stored in a separate directory inside `BuildahCacheDir` + // Example `/var/tmp/buildah-cache//buildah-cache-lockfile` + BuildahCacheLockfileDir = "buildah-cache-lockfiles" +) + +var ( + errBadMntOption = errors.New("invalid mount option") + errBadOptionArg = errors.New("must provide an argument for option") + errBadVolDest = errors.New("must set volume destination") + errBadVolSrc = errors.New("must set volume source") + errDuplicateDest = errors.New("duplicate mount destination") +) + +// CacheParent returns a cache parent for --mount=type=cache +func CacheParent() string { + return filepath.Join(tmpdir.GetTempDir(), buildahCacheDir+"-"+strconv.Itoa(unshare.GetRootlessUID())) +} + +// GetBindMount parses a single bind mount entry from the --mount flag. +// Returns specifiedMount and a string which contains name of image that we mounted otherwise its empty. +// Caller is expected to perform unmount of any mounted images +func GetBindMount(ctx *types.SystemContext, args []string, contextDir string, store storage.Store, imageMountLabel string, additionalMountPoints map[string]internal.StageMountDetails, workDir string) (specs.Mount, string, error) { + newMount := specs.Mount{ + Type: define.TypeBind, + } + + setRelabel := false + mountReadability := false + setDest := false + bindNonRecursive := false + fromImage := "" + + for _, val := range args { + kv := strings.SplitN(val, "=", 2) + switch kv[0] { + case "type": + // This is already processed + continue + case "bind-nonrecursive": + newMount.Options = append(newMount.Options, "bind") + bindNonRecursive = true + case "ro", "nosuid", "nodev", "noexec": + // TODO: detect duplication of these options. + // (Is this necessary?) + newMount.Options = append(newMount.Options, kv[0]) + mountReadability = true + case "rw", "readwrite": + newMount.Options = append(newMount.Options, "rw") + mountReadability = true + case "readonly": + // Alias for "ro" + newMount.Options = append(newMount.Options, "ro") + mountReadability = true + case "shared", "rshared", "private", "rprivate", "slave", "rslave", "Z", "z", "U": + newMount.Options = append(newMount.Options, kv[0]) + case "from": + if len(kv) == 1 { + return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + } + fromImage = kv[1] + case "bind-propagation": + if len(kv) == 1 { + return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + } + newMount.Options = append(newMount.Options, kv[1]) + case "src", "source": + if len(kv) == 1 { + return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + } + newMount.Source = kv[1] + case "target", "dst", "destination": + if len(kv) == 1 { + return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + } + targetPath := kv[1] + if !path.IsAbs(targetPath) { + targetPath = filepath.Join(workDir, targetPath) + } + if err := parse.ValidateVolumeCtrDir(targetPath); err != nil { + return newMount, "", err + } + newMount.Destination = targetPath + setDest = true + case "relabel": + if setRelabel { + return newMount, "", fmt.Errorf("cannot pass 'relabel' option more than once: %w", errBadOptionArg) + } + setRelabel = true + if len(kv) != 2 { + return newMount, "", fmt.Errorf("%s mount option must be 'private' or 'shared': %w", kv[0], errBadMntOption) + } + switch kv[1] { + case "private": + newMount.Options = append(newMount.Options, "Z") + case "shared": + newMount.Options = append(newMount.Options, "z") + default: + return newMount, "", fmt.Errorf("%s mount option must be 'private' or 'shared': %w", kv[0], errBadMntOption) + } + case "consistency": + // Option for OS X only, has no meaning on other platforms + // and can thus be safely ignored. + // See also the handling of the equivalent "delegated" and "cached" in ValidateVolumeOpts + default: + return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadMntOption) + } + } + + // default mount readability is always readonly + if !mountReadability { + newMount.Options = append(newMount.Options, "ro") + } + + // Following variable ensures that we return imagename only if we did additional mount + isImageMounted := false + if fromImage != "" { + mountPoint := "" + if additionalMountPoints != nil { + if val, ok := additionalMountPoints[fromImage]; ok { + mountPoint = val.MountPoint + } + } + // if mountPoint of image was not found in additionalMap + // or additionalMap was nil, try mounting image + if mountPoint == "" { + image, err := internalUtil.LookupImage(ctx, store, fromImage) + if err != nil { + return newMount, "", err + } + + mountPoint, err = image.Mount(context.Background(), nil, imageMountLabel) + if err != nil { + return newMount, "", err + } + isImageMounted = true + } + contextDir = mountPoint + } + + // buildkit parity: default bind option must be `rbind` + // unless specified + if !bindNonRecursive { + newMount.Options = append(newMount.Options, "rbind") + } + + if !setDest { + return newMount, fromImage, errBadVolDest + } + + // buildkit parity: support absolute path for sources from current build context + if contextDir != "" { + // path should be /contextDir/specified path + evaluated, err := copier.Eval(contextDir, newMount.Source, copier.EvalOptions{}) + if err != nil { + return newMount, "", err + } + newMount.Source = evaluated + } else { + // looks like its coming from `build run --mount=type=bind` allow using absolute path + // error out if no source is set + if newMount.Source == "" { + return newMount, "", errBadVolSrc + } + if err := parse.ValidateVolumeHostDir(newMount.Source); err != nil { + return newMount, "", err + } + } + + opts, err := parse.ValidateVolumeOpts(newMount.Options) + if err != nil { + return newMount, fromImage, err + } + newMount.Options = opts + + if !isImageMounted { + // we don't want any cleanups if image was not mounted explicitly + // so dont return anything + fromImage = "" + } + + return newMount, fromImage, nil +} + +// GetCacheMount parses a single cache mount entry from the --mount flag. +// +// If this function succeeds and returns a non-nil *lockfile.LockFile, the caller must unlock it (when??). +func GetCacheMount(args []string, store storage.Store, imageMountLabel string, additionalMountPoints map[string]internal.StageMountDetails, workDir string) (specs.Mount, *lockfile.LockFile, error) { + var err error + var mode uint64 + var buildahLockFilesDir string + var ( + setDest bool + setShared bool + setReadOnly bool + foundSElinuxLabel bool + ) + fromStage := "" + newMount := specs.Mount{ + Type: define.TypeBind, + } + // if id is set a new subdirectory with `id` will be created under /host-temp/buildah-build-cache/id + id := "" + //buidkit parity: cache directory defaults to 755 + mode = 0o755 + //buidkit parity: cache directory defaults to uid 0 if not specified + uid := 0 + //buidkit parity: cache directory defaults to gid 0 if not specified + gid := 0 + // sharing mode + sharing := "shared" + + for _, val := range args { + kv := strings.SplitN(val, "=", 2) + switch kv[0] { + case "type": + // This is already processed + continue + case "nosuid", "nodev", "noexec": + // TODO: detect duplication of these options. + // (Is this necessary?) + newMount.Options = append(newMount.Options, kv[0]) + case "rw", "readwrite": + newMount.Options = append(newMount.Options, "rw") + case "readonly", "ro": + // Alias for "ro" + newMount.Options = append(newMount.Options, "ro") + setReadOnly = true + case "Z", "z": + newMount.Options = append(newMount.Options, kv[0]) + foundSElinuxLabel = true + case "shared", "rshared", "private", "rprivate", "slave", "rslave", "U": + newMount.Options = append(newMount.Options, kv[0]) + setShared = true + case "sharing": + sharing = kv[1] + case "bind-propagation": + if len(kv) == 1 { + return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + } + newMount.Options = append(newMount.Options, kv[1]) + case "id": + if len(kv) == 1 { + return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + } + id = kv[1] + case "from": + if len(kv) == 1 { + return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + } + fromStage = kv[1] + case "target", "dst", "destination": + if len(kv) == 1 { + return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + } + targetPath := kv[1] + if !path.IsAbs(targetPath) { + targetPath = filepath.Join(workDir, targetPath) + } + if err := parse.ValidateVolumeCtrDir(targetPath); err != nil { + return newMount, nil, err + } + newMount.Destination = targetPath + setDest = true + case "src", "source": + if len(kv) == 1 { + return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + } + newMount.Source = kv[1] + case "mode": + if len(kv) == 1 { + return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + } + mode, err = strconv.ParseUint(kv[1], 8, 32) + if err != nil { + return newMount, nil, fmt.Errorf("unable to parse cache mode: %w", err) + } + case "uid": + if len(kv) == 1 { + return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + } + uid, err = strconv.Atoi(kv[1]) + if err != nil { + return newMount, nil, fmt.Errorf("unable to parse cache uid: %w", err) + } + case "gid": + if len(kv) == 1 { + return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + } + gid, err = strconv.Atoi(kv[1]) + if err != nil { + return newMount, nil, fmt.Errorf("unable to parse cache gid: %w", err) + } + default: + return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadMntOption) + } + } + + // If selinux is enabled and no selinux option was configured + // default to `z` i.e shared content label. + if !foundSElinuxLabel && (selinux.EnforceMode() != selinux.Disabled) && fromStage == "" { + newMount.Options = append(newMount.Options, "z") + } + + if !setDest { + return newMount, nil, errBadVolDest + } + + if fromStage != "" { + // do not create cache on host + // instead use read-only mounted stage as cache + mountPoint := "" + if additionalMountPoints != nil { + if val, ok := additionalMountPoints[fromStage]; ok { + if val.IsStage { + mountPoint = val.MountPoint + } + } + } + // Cache does not supports using image so if not stage found + // return with error + if mountPoint == "" { + return newMount, nil, fmt.Errorf("no stage found with name %s", fromStage) + } + // path should be /contextDir/specified path + newMount.Source = filepath.Join(mountPoint, filepath.Clean(string(filepath.Separator)+newMount.Source)) + } else { + // we need to create cache on host if no image is being used + + // since type is cache and cache can be reused by consecutive builds + // create a common cache directory, which persists on hosts within temp lifecycle + // add subdirectory if specified + + // cache parent directory: creates separate cache parent for each user. + cacheParent := CacheParent() + // create cache on host if not present + err = os.MkdirAll(cacheParent, os.FileMode(0755)) + if err != nil { + return newMount, nil, fmt.Errorf("unable to create build cache directory: %w", err) + } + + if id != "" { + newMount.Source = filepath.Join(cacheParent, filepath.Clean(id)) + buildahLockFilesDir = filepath.Join(BuildahCacheLockfileDir, filepath.Clean(id)) + } else { + newMount.Source = filepath.Join(cacheParent, filepath.Clean(newMount.Destination)) + buildahLockFilesDir = filepath.Join(BuildahCacheLockfileDir, filepath.Clean(newMount.Destination)) + } + idPair := idtools.IDPair{ + UID: uid, + GID: gid, + } + //buildkit parity: change uid and gid if specified otheriwise keep `0` + err = idtools.MkdirAllAndChownNew(newMount.Source, os.FileMode(mode), idPair) + if err != nil { + return newMount, nil, fmt.Errorf("unable to change uid,gid of cache directory: %w", err) + } + + // create a subdirectory inside `cacheParent` just to store lockfiles + buildahLockFilesDir = filepath.Join(cacheParent, buildahLockFilesDir) + err = os.MkdirAll(buildahLockFilesDir, os.FileMode(0700)) + if err != nil { + return newMount, nil, fmt.Errorf("unable to create build cache lockfiles directory: %w", err) + } + } + + var targetLock *lockfile.LockFile // = nil + succeeded := false + defer func() { + if !succeeded && targetLock != nil { + targetLock.Unlock() + } + }() + switch sharing { + case "locked": + // lock parent cache + lockfile, err := lockfile.GetLockFile(filepath.Join(buildahLockFilesDir, BuildahCacheLockfile)) + if err != nil { + return newMount, nil, fmt.Errorf("unable to acquire lock when sharing mode is locked: %w", err) + } + // Will be unlocked after the RUN step is executed. + lockfile.Lock() + targetLock = lockfile + case "shared": + // do nothing since default is `shared` + break + default: + // error out for unknown values + return newMount, nil, fmt.Errorf("unrecognized value %q for field `sharing`: %w", sharing, err) + } + + // buildkit parity: default sharing should be shared + // unless specified + if !setShared { + newMount.Options = append(newMount.Options, "shared") + } + + // buildkit parity: cache must writable unless `ro` or `readonly` is configured explicitly + if !setReadOnly { + newMount.Options = append(newMount.Options, "rw") + } + + newMount.Options = append(newMount.Options, "bind") + + opts, err := parse.ValidateVolumeOpts(newMount.Options) + if err != nil { + return newMount, nil, err + } + newMount.Options = opts + + succeeded = true + return newMount, targetLock, nil +} + +func getVolumeMounts(volumes []string) (map[string]specs.Mount, error) { + finalVolumeMounts := make(map[string]specs.Mount) + + for _, volume := range volumes { + volumeMount, err := internalParse.Volume(volume) + if err != nil { + return nil, err + } + if _, ok := finalVolumeMounts[volumeMount.Destination]; ok { + return nil, fmt.Errorf("%v: %w", volumeMount.Destination, errDuplicateDest) + } + finalVolumeMounts[volumeMount.Destination] = volumeMount + } + return finalVolumeMounts, nil +} + +// UnlockLockArray is a helper for cleaning up after GetVolumes and the like. +func UnlockLockArray(locks []*lockfile.LockFile) { + for _, lock := range locks { + lock.Unlock() + } +} + +// GetVolumes gets the volumes from --volume and --mount +// +// If this function succeeds, the caller must unlock the returned *lockfile.LockFile s if any (when??). +func GetVolumes(ctx *types.SystemContext, store storage.Store, volumes []string, mounts []string, contextDir string, workDir string) ([]specs.Mount, []string, []*lockfile.LockFile, error) { + unifiedMounts, mountedImages, targetLocks, err := getMounts(ctx, store, mounts, contextDir, workDir) + if err != nil { + return nil, mountedImages, nil, err + } + succeeded := false + defer func() { + if !succeeded { + UnlockLockArray(targetLocks) + } + }() + volumeMounts, err := getVolumeMounts(volumes) + if err != nil { + return nil, mountedImages, nil, err + } + for dest, mount := range volumeMounts { + if _, ok := unifiedMounts[dest]; ok { + return nil, mountedImages, nil, fmt.Errorf("%v: %w", dest, errDuplicateDest) + } + unifiedMounts[dest] = mount + } + + finalMounts := make([]specs.Mount, 0, len(unifiedMounts)) + for _, mount := range unifiedMounts { + finalMounts = append(finalMounts, mount) + } + succeeded = true + return finalMounts, mountedImages, targetLocks, nil +} + +// getMounts takes user-provided input from the --mount flag and creates OCI +// spec mounts. +// buildah run --mount type=bind,src=/etc/resolv.conf,target=/etc/resolv.conf ... +// buildah run --mount type=tmpfs,target=/dev/shm ... +// +// If this function succeeds, the caller must unlock the returned *lockfile.LockFile s if any (when??). +func getMounts(ctx *types.SystemContext, store storage.Store, mounts []string, contextDir string, workDir string) (map[string]specs.Mount, []string, []*lockfile.LockFile, error) { + // If `type` is not set default to "bind" + mountType := define.TypeBind + finalMounts := make(map[string]specs.Mount) + mountedImages := make([]string, 0) + targetLocks := make([]*lockfile.LockFile, 0) + succeeded := false + defer func() { + if !succeeded { + UnlockLockArray(targetLocks) + } + }() + + errInvalidSyntax := errors.New("incorrect mount format: should be --mount type=,[src=,]target=[,options]") + + // TODO(vrothberg): the manual parsing can be replaced with a regular expression + // to allow a more robust parsing of the mount format and to give + // precise errors regarding supported format versus supported options. + for _, mount := range mounts { + tokens := strings.Split(mount, ",") + if len(tokens) < 2 { + return nil, mountedImages, nil, fmt.Errorf("%q: %w", mount, errInvalidSyntax) + } + for _, field := range tokens { + if strings.HasPrefix(field, "type=") { + kv := strings.Split(field, "=") + if len(kv) != 2 { + return nil, mountedImages, nil, fmt.Errorf("%q: %w", mount, errInvalidSyntax) + } + mountType = kv[1] + } + } + switch mountType { + case define.TypeBind: + mount, image, err := GetBindMount(ctx, tokens, contextDir, store, "", nil, workDir) + if err != nil { + return nil, mountedImages, nil, err + } + if _, ok := finalMounts[mount.Destination]; ok { + return nil, mountedImages, nil, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest) + } + finalMounts[mount.Destination] = mount + mountedImages = append(mountedImages, image) + case TypeCache: + mount, tl, err := GetCacheMount(tokens, store, "", nil, workDir) + if err != nil { + return nil, mountedImages, nil, err + } + if tl != nil { + targetLocks = append(targetLocks, tl) + } + if _, ok := finalMounts[mount.Destination]; ok { + return nil, mountedImages, nil, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest) + } + finalMounts[mount.Destination] = mount + case TypeTmpfs: + mount, err := GetTmpfsMount(tokens) + if err != nil { + return nil, mountedImages, nil, err + } + if _, ok := finalMounts[mount.Destination]; ok { + return nil, mountedImages, nil, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest) + } + finalMounts[mount.Destination] = mount + default: + return nil, mountedImages, nil, fmt.Errorf("invalid filesystem type %q", mountType) + } + } + + succeeded = true + return finalMounts, mountedImages, targetLocks, nil +} + +// GetTmpfsMount parses a single tmpfs mount entry from the --mount flag +func GetTmpfsMount(args []string) (specs.Mount, error) { + newMount := specs.Mount{ + Type: TypeTmpfs, + Source: TypeTmpfs, + } + + setDest := false + + for _, val := range args { + kv := strings.SplitN(val, "=", 2) + switch kv[0] { + case "type": + // This is already processed + continue + case "ro", "nosuid", "nodev", "noexec": + newMount.Options = append(newMount.Options, kv[0]) + case "readonly": + // Alias for "ro" + newMount.Options = append(newMount.Options, "ro") + case "tmpcopyup": + //the path that is shadowed by the tmpfs mount is recursively copied up to the tmpfs itself. + newMount.Options = append(newMount.Options, kv[0]) + case "tmpfs-mode": + if len(kv) == 1 { + return newMount, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + } + newMount.Options = append(newMount.Options, fmt.Sprintf("mode=%s", kv[1])) + case "tmpfs-size": + if len(kv) == 1 { + return newMount, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + } + newMount.Options = append(newMount.Options, fmt.Sprintf("size=%s", kv[1])) + case "src", "source": + return newMount, errors.New("source is not supported with tmpfs mounts") + case "target", "dst", "destination": + if len(kv) == 1 { + return newMount, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + } + if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil { + return newMount, err + } + newMount.Destination = kv[1] + setDest = true + default: + return newMount, fmt.Errorf("%v: %w", kv[0], errBadMntOption) + } + } + + if !setDest { + return newMount, errBadVolDest + } + + return newMount, nil +} diff --git a/vendor/github.com/containers/buildah/libdm_tag.sh b/vendor/github.com/containers/buildah/libdm_tag.sh deleted file mode 100644 index 815b5d91..00000000 --- a/vendor/github.com/containers/buildah/libdm_tag.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/usr/bin/env bash -tmpdir="$PWD/tmp.$RANDOM" -mkdir -p "$tmpdir" -trap 'rm -fr "$tmpdir"' EXIT -${CC:-cc} ${CFLAGS} ${CPPFLAGS} ${LDFLAGS} -o "$tmpdir"/libdm_tag -x c - -ldevmapper > /dev/null 2> /dev/null << EOF -#include -int main() { - struct dm_task *task; - dm_task_deferred_remove(task); - return 0; -} -EOF -if test $? -ne 0 ; then - echo libdm_no_deferred_remove -fi diff --git a/vendor/github.com/containers/buildah/new.go b/vendor/github.com/containers/buildah/new.go index d2788505..45269c81 100644 --- a/vendor/github.com/containers/buildah/new.go +++ b/vendor/github.com/containers/buildah/new.go @@ -140,7 +140,7 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions systemContext := getSystemContext(store, options.SystemContext, options.SignaturePolicyPath) - if options.FromImage != "" && options.FromImage != "scratch" { + if options.FromImage != "" && options.FromImage != BaseImageFakeName { imageRuntime, err := libimage.RuntimeFromStore(store, &libimage.RuntimeOptions{SystemContext: systemContext}) if err != nil { return nil, err @@ -239,7 +239,7 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions tmpName = findUnusedContainer(tmpName, containers) } - conflict := 100 + suffixDigitsModulo := 100 for { var flags map[string]interface{} @@ -265,8 +265,10 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions if !errors.Is(err, storage.ErrDuplicateName) || options.Container != "" { return nil, fmt.Errorf("creating container: %w", err) } - tmpName = fmt.Sprintf("%s-%d", name, rand.Int()%conflict) - conflict = conflict * 10 + tmpName = fmt.Sprintf("%s-%d", name, rand.Int()%suffixDigitsModulo) + if suffixDigitsModulo < 1_000_000_000 { + suffixDigitsModulo *= 10 + } } defer func() { if err != nil { diff --git a/vendor/github.com/containers/buildah/pkg/chrootuser/user_unix.go b/vendor/github.com/containers/buildah/pkg/chrootuser/user_unix.go index b58f5a42..0ccaf8a6 100644 --- a/vendor/github.com/containers/buildah/pkg/chrootuser/user_unix.go +++ b/vendor/github.com/containers/buildah/pkg/chrootuser/user_unix.go @@ -92,6 +92,19 @@ type lookupGroupEntry struct { user string } +func scanWithoutComments(rc *bufio.Scanner) (string, bool) { + for { + if !rc.Scan() { + return "", false + } + line := rc.Text() + if strings.HasPrefix(strings.TrimSpace(line), "#") { + continue + } + return line, true + } +} + func parseNextPasswd(rc *bufio.Scanner) *lookupPasswdEntry { if !rc.Scan() { return nil @@ -118,10 +131,13 @@ func parseNextPasswd(rc *bufio.Scanner) *lookupPasswdEntry { } func parseNextGroup(rc *bufio.Scanner) *lookupGroupEntry { - if !rc.Scan() { + // On FreeBSD, /etc/group may contain comments: + // https://man.freebsd.org/cgi/man.cgi?query=group&sektion=5&format=html + // We need to ignore those lines rather than trying to parse them. + line, ok := scanWithoutComments(rc) + if !ok { return nil } - line := rc.Text() fields := strings.Split(line, ":") if len(fields) != 4 { return nil diff --git a/vendor/github.com/containers/buildah/pkg/overlay/overlay.go b/vendor/github.com/containers/buildah/pkg/overlay/overlay.go index 81810d28..e416ecd7 100644 --- a/vendor/github.com/containers/buildah/pkg/overlay/overlay.go +++ b/vendor/github.com/containers/buildah/pkg/overlay/overlay.go @@ -6,7 +6,6 @@ import ( "os/exec" "path/filepath" "strings" - "syscall" "errors" @@ -146,74 +145,6 @@ func mountWithMountProgram(mountProgram, overlayOptions, mergeDir string) error return nil } -// MountWithOptions creates a subdir of the contentDir based on the source directory -// from the source system. It then mounts up the source directory on to the -// generated mount point and returns the mount point to the caller. -// But allows api to set custom workdir, upperdir and other overlay options -// Following API is being used by podman at the moment -func MountWithOptions(contentDir, source, dest string, opts *Options) (mount specs.Mount, Err error) { - mergeDir := filepath.Join(contentDir, "merge") - - // Create overlay mount options for rw/ro. - var overlayOptions string - if opts.ReadOnly { - // Read-only overlay mounts require two lower layer. - lowerTwo := filepath.Join(contentDir, "lower") - if err := os.Mkdir(lowerTwo, 0755); err != nil { - return mount, err - } - overlayOptions = fmt.Sprintf("lowerdir=%s:%s,private", escapeColon(source), lowerTwo) - } else { - // Read-write overlay mounts want a lower, upper and a work layer. - workDir := filepath.Join(contentDir, "work") - upperDir := filepath.Join(contentDir, "upper") - - if opts.WorkDirOptionFragment != "" && opts.UpperDirOptionFragment != "" { - workDir = opts.WorkDirOptionFragment - upperDir = opts.UpperDirOptionFragment - } - - st, err := os.Stat(source) - if err != nil { - return mount, err - } - if err := os.Chmod(upperDir, st.Mode()); err != nil { - return mount, err - } - if stat, ok := st.Sys().(*syscall.Stat_t); ok { - if err := os.Chown(upperDir, int(stat.Uid), int(stat.Gid)); err != nil { - return mount, err - } - } - overlayOptions = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s,private", escapeColon(source), upperDir, workDir) - } - - mountProgram := findMountProgram(opts.GraphOpts) - if mountProgram != "" { - if err := mountWithMountProgram(mountProgram, overlayOptions, mergeDir); err != nil { - return mount, err - } - - mount.Source = mergeDir - mount.Destination = dest - mount.Type = "bind" - mount.Options = []string{"bind", "slave"} - return mount, nil - } - - if unshare.IsRootless() { - /* If a mount_program is not specified, fallback to try mounting native overlay. */ - overlayOptions = fmt.Sprintf("%s,userxattr", overlayOptions) - } - - mount.Source = mergeDir - mount.Destination = dest - mount.Type = "overlay" - mount.Options = strings.Split(overlayOptions, ",") - - return mount, nil -} - // Convert ":" to "\:", the path which will be overlay mounted need to be escaped func escapeColon(source string) string { return strings.ReplaceAll(source, ":", "\\:") diff --git a/vendor/github.com/containers/buildah/pkg/overlay/overlay_freebsd.go b/vendor/github.com/containers/buildah/pkg/overlay/overlay_freebsd.go new file mode 100644 index 00000000..e814a327 --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/overlay/overlay_freebsd.go @@ -0,0 +1,31 @@ +package overlay + +import ( + //"fmt" + //"os" + //"path/filepath" + //"strings" + //"syscall" + "errors" + + //"github.com/containers/storage/pkg/unshare" + "github.com/opencontainers/runtime-spec/specs-go" +) + +// MountWithOptions creates a subdir of the contentDir based on the source directory +// from the source system. It then mounts up the source directory on to the +// generated mount point and returns the mount point to the caller. +// But allows api to set custom workdir, upperdir and other overlay options +// Following API is being used by podman at the moment +func MountWithOptions(contentDir, source, dest string, opts *Options) (mount specs.Mount, Err error) { + if opts.ReadOnly { + // Read-only overlay mounts can be simulated with nullfs + mount.Source = source + mount.Destination = dest + mount.Type = "nullfs" + mount.Options = []string{"ro"} + return mount, nil + } else { + return mount, errors.New("read/write overlay mounts not supported on freebsd") + } +} diff --git a/vendor/github.com/containers/buildah/pkg/overlay/overlay_linux.go b/vendor/github.com/containers/buildah/pkg/overlay/overlay_linux.go new file mode 100644 index 00000000..9bd72bc2 --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/overlay/overlay_linux.go @@ -0,0 +1,80 @@ +package overlay + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/containers/storage/pkg/unshare" + "github.com/opencontainers/runtime-spec/specs-go" +) + +// MountWithOptions creates a subdir of the contentDir based on the source directory +// from the source system. It then mounts up the source directory on to the +// generated mount point and returns the mount point to the caller. +// But allows api to set custom workdir, upperdir and other overlay options +// Following API is being used by podman at the moment +func MountWithOptions(contentDir, source, dest string, opts *Options) (mount specs.Mount, Err error) { + mergeDir := filepath.Join(contentDir, "merge") + + // Create overlay mount options for rw/ro. + var overlayOptions string + if opts.ReadOnly { + // Read-only overlay mounts require two lower layer. + lowerTwo := filepath.Join(contentDir, "lower") + if err := os.Mkdir(lowerTwo, 0755); err != nil { + return mount, err + } + overlayOptions = fmt.Sprintf("lowerdir=%s:%s,private", escapeColon(source), lowerTwo) + } else { + // Read-write overlay mounts want a lower, upper and a work layer. + workDir := filepath.Join(contentDir, "work") + upperDir := filepath.Join(contentDir, "upper") + + if opts.WorkDirOptionFragment != "" && opts.UpperDirOptionFragment != "" { + workDir = opts.WorkDirOptionFragment + upperDir = opts.UpperDirOptionFragment + } + + st, err := os.Stat(source) + if err != nil { + return mount, err + } + if err := os.Chmod(upperDir, st.Mode()); err != nil { + return mount, err + } + if stat, ok := st.Sys().(*syscall.Stat_t); ok { + if err := os.Chown(upperDir, int(stat.Uid), int(stat.Gid)); err != nil { + return mount, err + } + } + overlayOptions = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s,private", escapeColon(source), upperDir, workDir) + } + + mountProgram := findMountProgram(opts.GraphOpts) + if mountProgram != "" { + if err := mountWithMountProgram(mountProgram, overlayOptions, mergeDir); err != nil { + return mount, err + } + + mount.Source = mergeDir + mount.Destination = dest + mount.Type = "bind" + mount.Options = []string{"bind", "slave"} + return mount, nil + } + + if unshare.IsRootless() { + /* If a mount_program is not specified, fallback to try mounting native overlay. */ + overlayOptions = fmt.Sprintf("%s,userxattr", overlayOptions) + } + + mount.Source = mergeDir + mount.Destination = dest + mount.Type = "overlay" + mount.Options = strings.Split(overlayOptions, ",") + + return mount, nil +} diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse.go b/vendor/github.com/containers/buildah/pkg/parse/parse.go index 8d02f59d..d865f504 100644 --- a/vendor/github.com/containers/buildah/pkg/parse/parse.go +++ b/vendor/github.com/containers/buildah/pkg/parse/parse.go @@ -16,8 +16,11 @@ import ( "github.com/containerd/containerd/platforms" "github.com/containers/buildah/define" + mkcwtypes "github.com/containers/buildah/internal/mkcw/types" internalParse "github.com/containers/buildah/internal/parse" + "github.com/containers/buildah/internal/tmpdir" "github.com/containers/buildah/pkg/sshagent" + "github.com/containers/common/pkg/auth" "github.com/containers/common/pkg/config" "github.com/containers/common/pkg/parse" "github.com/containers/image/v5/docker/reference" @@ -67,11 +70,6 @@ func RepoNamesToNamedReferences(destList []string) ([]reference.Named, error) { return result, nil } -// CleanCacheMount gets the cache parent created by `--mount=type=cache` and removes it. -func CleanCacheMount() error { - return internalParse.CleanCacheMount() -} - // CommonBuildOptions parses the build options from the bud cli func CommonBuildOptions(c *cobra.Command) (*define.CommonBuildOptions, error) { return CommonBuildOptionsFromFlagSet(c.Flags(), c.Flag) @@ -106,6 +104,7 @@ func CommonBuildOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name } } + noHostname, _ := flags.GetBool("no-hostname") noHosts, _ := flags.GetBool("no-hosts") addHost, _ := flags.GetStringSlice("add-host") @@ -154,9 +153,6 @@ func CommonBuildOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name return nil, fmt.Errorf("invalid --shm-size: %w", err) } volumes, _ := flags.GetStringArray("volume") - if err := Volumes(volumes); err != nil { - return nil, err - } cpuPeriod, _ := flags.GetUint64("cpu-period") cpuQuota, _ := flags.GetInt64("cpu-quota") cpuShares, _ := flags.GetUint64("cpu-shares") @@ -188,6 +184,7 @@ func CommonBuildOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name IdentityLabel: types.NewOptionalBool(identityLabel), Memory: memoryLimit, MemorySwap: memorySwap, + NoHostname: noHostname, NoHosts: noHosts, OmitHistory: omitHistory, ShmSize: findFlagFunc("shm-size").Value.String(), @@ -451,9 +448,13 @@ func SystemContextFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name strin func getAuthFile(authfile string) string { if authfile != "" { - return authfile + absAuthfile, err := filepath.Abs(authfile) + if err == nil { + return absAuthfile + } + logrus.Warnf("ignoring passed-in auth file path, evaluating it: %v", err) } - return os.Getenv("REGISTRY_AUTH_FILE") + return auth.GetDefaultAuthFile() } // PlatformFromOptions parses the operating system (os) and architecture (arch) @@ -510,8 +511,6 @@ func PlatformsFromOptions(c *cobra.Command) (platforms []struct{ OS, Arch, Varia return platforms, nil } -const platformSep = "/" - // DefaultPlatform returns the standard platform for the current system func DefaultPlatform() string { return platforms.DefaultString() @@ -520,21 +519,15 @@ func DefaultPlatform() string { // Platform separates the platform string into os, arch and variant, // accepting any of $arch, $os/$arch, or $os/$arch/$variant. func Platform(platform string) (os, arch, variant string, err error) { - split := strings.Split(platform, platformSep) - switch len(split) { - case 3: - variant = split[2] - fallthrough - case 2: - arch = split[1] - os = split[0] - return - case 1: - if platform == "local" { - return Platform(DefaultPlatform()) - } + platform = strings.Trim(platform, "/") + if platform == "local" || platform == "" { + return Platform(DefaultPlatform()) + } + platformSpec, err := platforms.Parse(platform) + if err != nil { + return "", "", "", fmt.Errorf("invalid platform syntax for --platform=%q: %w", platform, err) } - return "", "", "", fmt.Errorf("invalid platform syntax for %q (use OS/ARCH[/VARIANT][,...])", platform) + return platformSpec.OS, platformSpec.Architecture, platformSpec.Variant, nil } func parseCreds(creds string) (string, string) { @@ -641,6 +634,81 @@ func GetBuildOutput(buildOutput string) (define.BuildOutputOption, error) { return define.BuildOutputOption{Path: path, IsDir: isDir, IsStdout: isStdout}, nil } +// TeeType parses a string value and returns a TeeType +func TeeType(teeType string) define.TeeType { + return define.TeeType(strings.ToLower(teeType)) +} + +// GetConfidentialWorkloadOptions parses a confidential workload settings +// argument, which controls both whether or not we produce an image that +// expects to be run using krun, and how we handle things like encrypting +// the disk image that the container image will contain. +func GetConfidentialWorkloadOptions(arg string) (define.ConfidentialWorkloadOptions, error) { + options := define.ConfidentialWorkloadOptions{ + TempDir: GetTempDir(), + } + defaults := options + for _, option := range strings.Split(arg, ",") { + var err error + switch { + case strings.HasPrefix(option, "type="): + options.TeeType = TeeType(strings.TrimPrefix(option, "type=")) + switch options.TeeType { + case define.SEV, define.SNP, mkcwtypes.SEV_NO_ES: + default: + return options, fmt.Errorf("parsing type= value %q: unrecognized value", options.TeeType) + } + case strings.HasPrefix(option, "attestation_url="), strings.HasPrefix(option, "attestation-url="): + options.Convert = true + options.AttestationURL = strings.TrimPrefix(option, "attestation_url=") + if options.AttestationURL == option { + options.AttestationURL = strings.TrimPrefix(option, "attestation-url=") + } + case strings.HasPrefix(option, "passphrase="), strings.HasPrefix(option, "passphrase="): + options.Convert = true + options.DiskEncryptionPassphrase = strings.TrimPrefix(option, "passphrase=") + case strings.HasPrefix(option, "workload_id="), strings.HasPrefix(option, "workload-id="): + options.WorkloadID = strings.TrimPrefix(option, "workload_id=") + if options.WorkloadID == option { + options.WorkloadID = strings.TrimPrefix(option, "workload-id=") + } + case strings.HasPrefix(option, "cpus="): + options.CPUs, err = strconv.Atoi(strings.TrimPrefix(option, "cpus=")) + if err != nil { + return options, fmt.Errorf("parsing cpus= value %q: %w", strings.TrimPrefix(option, "cpus="), err) + } + case strings.HasPrefix(option, "memory="): + options.Memory, err = strconv.Atoi(strings.TrimPrefix(option, "memory=")) + if err != nil { + return options, fmt.Errorf("parsing memory= value %q: %w", strings.TrimPrefix(option, "memorys"), err) + } + case option == "ignore_attestation_errors", option == "ignore-attestation-errors": + options.IgnoreAttestationErrors = true + case strings.HasPrefix(option, "ignore_attestation_errors="), strings.HasPrefix(option, "ignore-attestation-errors="): + val := strings.TrimPrefix(option, "ignore_attestation_errors=") + if val == option { + val = strings.TrimPrefix(option, "ignore-attestation-errors=") + } + options.IgnoreAttestationErrors = val == "true" || val == "yes" || val == "on" || val == "1" + case strings.HasPrefix(option, "firmware-library="), strings.HasPrefix(option, "firmware_library="): + val := strings.TrimPrefix(option, "firmware-library=") + if val == option { + val = strings.TrimPrefix(option, "firmware_library=") + } + options.FirmwareLibrary = val + case strings.HasPrefix(option, "slop="): + options.Slop = strings.TrimPrefix(option, "slop=") + default: + knownOptions := []string{"type", "attestation_url", "passphrase", "workload_id", "cpus", "memory", "firmware_library", "slop"} + return options, fmt.Errorf("expected one or more of %q as arguments for --cw, not %q", knownOptions, option) + } + } + if options != defaults && !options.Convert { + return options, fmt.Errorf("--cw arguments missing one or more of (%q, %q)", "passphrase", "attestation_url") + } + return options, nil +} + // IDMappingOptions parses the build options related to user namespaces and ID mapping. func IDMappingOptions(c *cobra.Command, isolation define.Isolation) (usernsOptions define.NamespaceOptions, idmapOptions *define.IDMappingOptions, err error) { return IDMappingOptionsFromFlagSet(c.Flags(), c.PersistentFlags(), c.Flag) @@ -1002,11 +1070,9 @@ func isValidDeviceMode(mode string) bool { return true } +// GetTempDir returns the path of the preferred temporary directory on the host. func GetTempDir() string { - if tmpdir, ok := os.LookupEnv("TMPDIR"); ok { - return tmpdir - } - return "/var/tmp" + return tmpdir.GetTempDir() } // Secrets parses the --secret flag diff --git a/vendor/github.com/containers/buildah/pkg/sshagent/sshagent.go b/vendor/github.com/containers/buildah/pkg/sshagent/sshagent.go index b1cb7518..ec28482b 100644 --- a/vendor/github.com/containers/buildah/pkg/sshagent/sshagent.go +++ b/vendor/github.com/containers/buildah/pkg/sshagent/sshagent.go @@ -11,6 +11,7 @@ import ( "sync" "time" + "github.com/containers/buildah/internal/tmpdir" "github.com/opencontainers/selinux/go-selinux" "github.com/sirupsen/logrus" "golang.org/x/crypto/ssh" @@ -79,7 +80,7 @@ func (a *AgentServer) Serve(processLabel string) (string, error) { if err != nil { return "", err } - serveDir, err := os.MkdirTemp("", ".buildah-ssh-sock") + serveDir, err := os.MkdirTemp(tmpdir.GetTempDir(), ".buildah-ssh-sock") if err != nil { return "", err } @@ -201,8 +202,13 @@ func NewSource(paths []string) (*Source, error) { if len(paths) == 0 { socket = os.Getenv("SSH_AUTH_SOCK") if socket == "" { - return nil, errors.New("$SSH_AUTH_SOCK not set") + return nil, errors.New("SSH_AUTH_SOCK not set in environment") } + absSocket, err := filepath.Abs(socket) + if err != nil { + return nil, fmt.Errorf("evaluating SSH_AUTH_SOCK in environment: %w", err) + } + socket = absSocket } for _, p := range paths { if socket != "" { diff --git a/vendor/github.com/containers/buildah/pkg/util/resource_unix.go b/vendor/github.com/containers/buildah/pkg/util/resource_unix.go new file mode 100644 index 00000000..4f7c08cf --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/util/resource_unix.go @@ -0,0 +1,38 @@ +//go:build linux || freebsd || darwin +// +build linux freebsd darwin + +package util + +import ( + "fmt" + "syscall" + + "github.com/docker/go-units" +) + +func ParseUlimit(ulimit string) (*units.Ulimit, error) { + ul, err := units.ParseUlimit(ulimit) + if err != nil { + return nil, fmt.Errorf("ulimit option %q requires name=SOFT:HARD, failed to be parsed: %w", ulimit, err) + } + + if ul.Hard != -1 && ul.Soft == -1 { + return ul, nil + } + + rl, err := ul.GetRlimit() + if err != nil { + return nil, err + } + var limit syscall.Rlimit + if err := syscall.Getrlimit(rl.Type, &limit); err != nil { + return nil, err + } + if ul.Soft == -1 { + ul.Soft = int64(limit.Cur) + } + if ul.Hard == -1 { + ul.Hard = int64(limit.Max) + } + return ul, nil +} diff --git a/vendor/github.com/containers/buildah/pkg/util/resource_windows.go b/vendor/github.com/containers/buildah/pkg/util/resource_windows.go new file mode 100644 index 00000000..37170918 --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/util/resource_windows.go @@ -0,0 +1,16 @@ +package util + +import ( + "fmt" + + "github.com/docker/go-units" +) + +func ParseUlimit(ulimit string) (*units.Ulimit, error) { + ul, err := units.ParseUlimit(ulimit) + if err != nil { + return nil, fmt.Errorf("ulimit option %q requires name=SOFT:HARD, failed to be parsed: %w", ulimit, err) + } + + return ul, nil +} diff --git a/vendor/github.com/containers/buildah/pkg/util/util.go b/vendor/github.com/containers/buildah/pkg/util/util.go index 6bb20219..17ad3605 100644 --- a/vendor/github.com/containers/buildah/pkg/util/util.go +++ b/vendor/github.com/containers/buildah/pkg/util/util.go @@ -5,6 +5,8 @@ import ( "os" "path/filepath" "strings" + + "github.com/containers/buildah/pkg/parse" ) // Mirrors path to a tmpfile if path points to a @@ -17,7 +19,7 @@ import ( func MirrorToTempFileIfPathIsDescriptor(file string) (string, bool) { // one use-case is discussed here // https://github.com/containers/buildah/issues/3070 - if !strings.HasPrefix(file, "/dev/fd") { + if !strings.HasPrefix(file, "/dev/fd/") { return file, false } b, err := os.ReadFile(file) @@ -25,10 +27,11 @@ func MirrorToTempFileIfPathIsDescriptor(file string) (string, bool) { // if anything goes wrong return original path return file, false } - tmpfile, err := os.CreateTemp(os.TempDir(), "buildah-temp-file") + tmpfile, err := os.CreateTemp(parse.GetTempDir(), "buildah-temp-file") if err != nil { return file, false } + defer tmpfile.Close() if _, err := tmpfile.Write(b); err != nil { // if anything goes wrong return original path return file, false diff --git a/vendor/github.com/containers/buildah/push.go b/vendor/github.com/containers/buildah/push.go index 1f443914..2e2b9498 100644 --- a/vendor/github.com/containers/buildah/push.go +++ b/vendor/github.com/containers/buildah/push.go @@ -95,6 +95,10 @@ type PushOptions struct { CompressionFormat *compression.Algorithm // CompressionLevel specifies what compression level is used CompressionLevel *int + // ForceCompressionFormat ensures that the compression algorithm set in + // CompressionFormat is used exclusively, and blobs of other compression + // algorithms are not reused. + ForceCompressionFormat bool } // Push copies the contents of the image to a new location. @@ -110,6 +114,7 @@ func Push(ctx context.Context, image string, dest types.ImageReference, options libimageOptions.OciEncryptLayers = options.OciEncryptLayers libimageOptions.CompressionFormat = options.CompressionFormat libimageOptions.CompressionLevel = options.CompressionLevel + libimageOptions.ForceCompressionFormat = options.ForceCompressionFormat libimageOptions.PolicyAllowStorage = true if options.Quiet { diff --git a/vendor/github.com/containers/buildah/run.go b/vendor/github.com/containers/buildah/run.go index 4473bef0..77887dfe 100644 --- a/vendor/github.com/containers/buildah/run.go +++ b/vendor/github.com/containers/buildah/run.go @@ -10,7 +10,6 @@ import ( "github.com/containers/image/v5/types" "github.com/containers/storage/pkg/lockfile" "github.com/opencontainers/runtime-spec/specs-go" - spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" ) @@ -87,7 +86,9 @@ type RunOptions struct { Runtime string // Args adds global arguments for the runtime. Args []string - // NoHosts use the images /etc/hosts file + // NoHostname won't create new /etc/hostname file + NoHostname bool + // NoHosts won't create new /etc/hosts file NoHosts bool // NoPivot adds the --no-pivot runtime flag. NoPivot bool @@ -199,8 +200,8 @@ type runMountInfo struct { // IDMaps are the UIDs, GID, and maps for the run type IDMaps struct { - uidmap []spec.LinuxIDMapping - gidmap []spec.LinuxIDMapping + uidmap []specs.LinuxIDMapping + gidmap []specs.LinuxIDMapping rootUID int rootGID int processUID int diff --git a/vendor/github.com/containers/buildah/run_common.go b/vendor/github.com/containers/buildah/run_common.go index bcfd70e1..93550f62 100644 --- a/vendor/github.com/containers/buildah/run_common.go +++ b/vendor/github.com/containers/buildah/run_common.go @@ -26,8 +26,8 @@ import ( "github.com/containers/buildah/copier" "github.com/containers/buildah/define" "github.com/containers/buildah/internal" - internalParse "github.com/containers/buildah/internal/parse" internalUtil "github.com/containers/buildah/internal/util" + "github.com/containers/buildah/internal/volumes" "github.com/containers/buildah/pkg/overlay" "github.com/containers/buildah/pkg/sshagent" "github.com/containers/buildah/util" @@ -35,6 +35,7 @@ import ( "github.com/containers/common/libnetwork/network" "github.com/containers/common/libnetwork/resolvconf" netTypes "github.com/containers/common/libnetwork/types" + netUtil "github.com/containers/common/libnetwork/util" "github.com/containers/common/pkg/config" "github.com/containers/common/pkg/subscriptions" imageTypes "github.com/containers/image/v5/types" @@ -47,7 +48,6 @@ import ( storageTypes "github.com/containers/storage/types" "github.com/opencontainers/go-digest" "github.com/opencontainers/runtime-spec/specs-go" - spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-tools/generate" "github.com/opencontainers/selinux/go-selinux/label" "github.com/sirupsen/logrus" @@ -62,8 +62,8 @@ func (b *Builder) addResolvConf(rdir string, chownOpts *idtools.IDPair, dnsServe return "", fmt.Errorf("failed to get config: %w", err) } - nameservers := make([]string, 0, len(defaultConfig.Containers.DNSServers)+len(dnsServers)) - nameservers = append(nameservers, defaultConfig.Containers.DNSServers...) + nameservers := make([]string, 0, len(defaultConfig.Containers.DNSServers.Get())+len(dnsServers)) + nameservers = append(nameservers, defaultConfig.Containers.DNSServers.Get()...) nameservers = append(nameservers, dnsServers...) keepHostServers := false @@ -79,12 +79,12 @@ func (b *Builder) addResolvConf(rdir string, chownOpts *idtools.IDPair, dnsServe } } - searches := make([]string, 0, len(defaultConfig.Containers.DNSSearches)+len(dnsSearch)) - searches = append(searches, defaultConfig.Containers.DNSSearches...) + searches := make([]string, 0, len(defaultConfig.Containers.DNSSearches.Get())+len(dnsSearch)) + searches = append(searches, defaultConfig.Containers.DNSSearches.Get()...) searches = append(searches, dnsSearch...) - options := make([]string, 0, len(defaultConfig.Containers.DNSOptions)+len(dnsOptions)) - options = append(options, defaultConfig.Containers.DNSOptions...) + options := make([]string, 0, len(defaultConfig.Containers.DNSOptions.Get())+len(dnsOptions)) + options = append(options, defaultConfig.Containers.DNSOptions.Get()...) options = append(options, dnsOptions...) cfile := filepath.Join(rdir, "resolv.conf") @@ -117,7 +117,7 @@ func (b *Builder) addResolvConf(rdir string, chownOpts *idtools.IDPair, dnsServe } // generateHosts creates a containers hosts file -func (b *Builder) generateHosts(rdir string, chownOpts *idtools.IDPair, imageRoot string) (string, error) { +func (b *Builder) generateHosts(rdir string, chownOpts *idtools.IDPair, imageRoot string, spec *specs.Spec) (string, error) { conf, err := config.Default() if err != nil { return "", err @@ -128,12 +128,34 @@ func (b *Builder) generateHosts(rdir string, chownOpts *idtools.IDPair, imageRoo return "", err } + var entries etchosts.HostEntries + isHost := true + if spec.Linux != nil { + for _, ns := range spec.Linux.Namespaces { + if ns.Type == specs.NetworkNamespace { + isHost = false + break + } + } + } + // add host entry for local ip when running in host network + if spec.Hostname != "" && isHost { + ip := netUtil.GetLocalIP() + if ip != "" { + entries = append(entries, etchosts.HostEntry{ + Names: []string{spec.Hostname}, + IP: ip, + }) + } + } + targetfile := filepath.Join(rdir, "hosts") if err := etchosts.New(&etchosts.Params{ BaseFile: path, ExtraHosts: b.CommonBuildOpts.AddHost, HostContainersInternalIP: etchosts.GetHostContainersInternalIP(conf, nil, nil), TargetFile: targetfile, + ContainerIPs: entries, }); err != nil { return "", err } @@ -322,7 +344,7 @@ func getNetworkInterface(store storage.Store, cniConfDir, cniPluginPath string) } if len(cniPluginPath) > 0 { plugins := strings.Split(cniPluginPath, string(os.PathListSeparator)) - newconf.Network.CNIPluginDirs = plugins + newconf.Network.CNIPluginDirs.Set(plugins) } _, netInt, err := network.NetworkBackend(store, &newconf, false) @@ -368,6 +390,9 @@ func checkAndOverrideIsolationOptions(isolation define.Isolation, options *RunOp if (pidns != nil && pidns.Host) && (userns != nil && !userns.Host) { return fmt.Errorf("not allowed to mix host PID namespace with container user namespace") } + case IsolationChroot: + logrus.Info("network namespace isolation not supported with chroot isolation, forcing host network") + options.NamespaceOptions.AddOrReplace(define.NamespaceOption{Name: string(specs.NetworkNamespace), Host: true}) } return nil } @@ -398,15 +423,6 @@ func waitForSync(pipeR *os.File) error { return err } -func contains(volumes []string, v string) bool { - for _, i := range volumes { - if i == v { - return true - } - } - return false -} - func runUsingRuntime(options RunOptions, configureNetwork bool, moreCreateArgs []string, spec *specs.Spec, bundlePath, containerName string, containerCreateW io.WriteCloser, containerStartR io.ReadCloser) (wstatus unix.WaitStatus, err error) { if options.Logger == nil { @@ -1105,8 +1121,12 @@ func runUsingRuntimeMain() { os.Exit(1) } -func (b *Builder) runUsingRuntimeSubproc(isolation define.Isolation, options RunOptions, configureNetwork bool, configureNetworks, +func (b *Builder) runUsingRuntimeSubproc(isolation define.Isolation, options RunOptions, configureNetwork bool, networkString string, moreCreateArgs []string, spec *specs.Spec, rootPath, bundlePath, containerName, buildContainerName, hostsFile string) (err error) { + // Lock the caller to a single OS-level thread. + runtime.LockOSThread() + defer runtime.UnlockOSThread() + var confwg sync.WaitGroup config, conferr := json.Marshal(runUsingRuntimeSubprocOptions{ Options: options, @@ -1207,7 +1227,7 @@ func (b *Builder) runUsingRuntimeSubproc(isolation define.Isolation, options Run return fmt.Errorf("parsing pid %s as a number: %w", string(pidValue), err) } - teardown, netstatus, err := b.runConfigureNetwork(pid, isolation, options, configureNetworks, containerName) + teardown, netstatus, err := b.runConfigureNetwork(pid, isolation, options, networkString, containerName) if teardown != nil { defer teardown() } @@ -1217,13 +1237,7 @@ func (b *Builder) runUsingRuntimeSubproc(isolation define.Isolation, options Run // only add hosts if we manage the hosts file if hostsFile != "" { - var entries etchosts.HostEntries - if netstatus != nil { - entries = etchosts.GetNetworkHostEntries(netstatus, spec.Hostname, buildContainerName) - } else { - // we have slirp4netns, default to slirp4netns ip since this is not configurable in buildah - entries = etchosts.HostEntries{{IP: "10.0.2.100", Names: []string{spec.Hostname, buildContainerName}}} - } + entries := etchosts.GetNetworkHostEntries(netstatus, spec.Hostname, buildContainerName) // make sure to sync this with (b *Builder) generateHosts() err = etchosts.Add(hostsFile, entries) if err != nil { @@ -1328,14 +1342,14 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st processGID: int(processGID), } // Get the list of mounts that are just for this Run() call. - runMounts, mountArtifacts, err := b.runSetupRunMounts(runFileMounts, runMountInfo, idMaps) + runMounts, mountArtifacts, err := b.runSetupRunMounts(mountPoint, runFileMounts, runMountInfo, idMaps) if err != nil { return nil, err } succeeded := false defer func() { if !succeeded { - internalParse.UnlockLockArray(mountArtifacts.TargetLocks) + volumes.UnlockLockArray(mountArtifacts.TargetLocks) } }() // Add temporary copies of the contents of volume locations at the @@ -1444,7 +1458,7 @@ func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, builtin } // Destinations which can be cleaned up after every RUN -func cleanableDestinationListFromMounts(mounts []spec.Mount) []string { +func cleanableDestinationListFromMounts(mounts []specs.Mount) []string { mountDest := []string{} for _, mount := range mounts { // Add all destination to mountArtifacts so that they can be cleaned up later @@ -1464,12 +1478,28 @@ func cleanableDestinationListFromMounts(mounts []spec.Mount) []string { return mountDest } +func checkIfMountDestinationPreExists(root string, dest string) (bool, error) { + statResults, err := copier.Stat(root, "", copier.StatOptions{}, []string{dest}) + if err != nil { + return false, err + } + if len(statResults) > 0 { + // We created exact path for globbing so it will + // return only one result. + if statResults[0].Error != "" && len(statResults[0].Globbed) == 0 { + // Path do not exist. + return false, nil + } + // Path exists. + return true, nil + } + return false, nil +} + // runSetupRunMounts sets up mounts that exist only in this RUN, not in subsequent runs // // If this function succeeds, the caller must unlock runMountArtifacts.TargetLocks (when??) -func (b *Builder) runSetupRunMounts(mounts []string, sources runMountInfo, idMaps IDMaps) ([]spec.Mount, *runMountArtifacts, error) { - // If `type` is not set default to TypeBind - mountType := define.TypeBind +func (b *Builder) runSetupRunMounts(mountPoint string, mounts []string, sources runMountInfo, idMaps IDMaps) ([]specs.Mount, *runMountArtifacts, error) { mountTargets := make([]string, 0, 10) tmpFiles := make([]string, 0, len(mounts)) mountImages := make([]string, 0, 10) @@ -1481,11 +1511,20 @@ func (b *Builder) runSetupRunMounts(mounts []string, sources runMountInfo, idMap succeeded := false defer func() { if !succeeded { - internalParse.UnlockLockArray(targetLocks) + volumes.UnlockLockArray(targetLocks) } }() for _, mount := range mounts { + var mountSpec *specs.Mount + var err error + var envFile, image string + var agent *sshagent.AgentServer + var tl *lockfile.LockFile tokens := strings.Split(mount, ",") + + // If `type` is not set default to TypeBind + mountType := define.TypeBind + for _, field := range tokens { if strings.HasPrefix(field, "type=") { kv := strings.Split(field, "=") @@ -1497,63 +1536,71 @@ func (b *Builder) runSetupRunMounts(mounts []string, sources runMountInfo, idMap } switch mountType { case "secret": - mount, envFile, err := b.getSecretMount(tokens, sources.Secrets, idMaps, sources.WorkDir) + mountSpec, envFile, err = b.getSecretMount(tokens, sources.Secrets, idMaps, sources.WorkDir) if err != nil { return nil, nil, err } - if mount != nil { - finalMounts = append(finalMounts, *mount) - mountTargets = append(mountTargets, mount.Destination) + if mountSpec != nil { + finalMounts = append(finalMounts, *mountSpec) if envFile != "" { tmpFiles = append(tmpFiles, envFile) } } case "ssh": - mount, agent, err := b.getSSHMount(tokens, sshCount, sources.SSHSources, idMaps) + mountSpec, agent, err = b.getSSHMount(tokens, sshCount, sources.SSHSources, idMaps) if err != nil { return nil, nil, err } - if mount != nil { - finalMounts = append(finalMounts, *mount) - mountTargets = append(mountTargets, mount.Destination) + if mountSpec != nil { + finalMounts = append(finalMounts, *mountSpec) agents = append(agents, agent) if sshCount == 0 { - defaultSSHSock = mount.Destination + defaultSSHSock = mountSpec.Destination } // Count is needed as the default destination of the ssh sock inside the container is /run/buildkit/ssh_agent.{i} sshCount++ } case define.TypeBind: - mount, image, err := b.getBindMount(tokens, sources.SystemContext, sources.ContextDir, sources.StageMountPoints, idMaps, sources.WorkDir) + mountSpec, image, err = b.getBindMount(tokens, sources.SystemContext, sources.ContextDir, sources.StageMountPoints, idMaps, sources.WorkDir) if err != nil { return nil, nil, err } - finalMounts = append(finalMounts, *mount) - mountTargets = append(mountTargets, mount.Destination) + finalMounts = append(finalMounts, *mountSpec) // only perform cleanup if image was mounted ignore everything else if image != "" { mountImages = append(mountImages, image) } case "tmpfs": - mount, err := b.getTmpfsMount(tokens, idMaps) + mountSpec, err = b.getTmpfsMount(tokens, idMaps) if err != nil { return nil, nil, err } - finalMounts = append(finalMounts, *mount) - mountTargets = append(mountTargets, mount.Destination) + finalMounts = append(finalMounts, *mountSpec) case "cache": - mount, tl, err := b.getCacheMount(tokens, sources.StageMountPoints, idMaps, sources.WorkDir) + mountSpec, tl, err = b.getCacheMount(tokens, sources.StageMountPoints, idMaps, sources.WorkDir) if err != nil { return nil, nil, err } - finalMounts = append(finalMounts, *mount) - mountTargets = append(mountTargets, mount.Destination) + finalMounts = append(finalMounts, *mountSpec) if tl != nil { targetLocks = append(targetLocks, tl) } default: return nil, nil, fmt.Errorf("invalid mount type %q", mountType) } + + if mountSpec != nil { + pathPreExists, err := checkIfMountDestinationPreExists(mountPoint, mountSpec.Destination) + if err != nil { + return nil, nil, err + } + if !pathPreExists { + // In such case it means that the path did not exists before + // creating any new mounts therefore we must clean the newly + // created directory after this step. + mountTargets = append(mountTargets, mountSpec.Destination) + } + } } succeeded = true artifacts := &runMountArtifacts{ @@ -1567,12 +1614,12 @@ func (b *Builder) runSetupRunMounts(mounts []string, sources runMountInfo, idMap return finalMounts, artifacts, nil } -func (b *Builder) getBindMount(tokens []string, context *imageTypes.SystemContext, contextDir string, stageMountPoints map[string]internal.StageMountDetails, idMaps IDMaps, workDir string) (*spec.Mount, string, error) { +func (b *Builder) getBindMount(tokens []string, context *imageTypes.SystemContext, contextDir string, stageMountPoints map[string]internal.StageMountDetails, idMaps IDMaps, workDir string) (*specs.Mount, string, error) { if contextDir == "" { return nil, "", errors.New("Context Directory for current run invocation is not configured") } var optionMounts []specs.Mount - mount, image, err := internalParse.GetBindMount(context, tokens, contextDir, b.store, b.MountLabel, stageMountPoints, workDir) + mount, image, err := volumes.GetBindMount(context, tokens, contextDir, b.store, b.MountLabel, stageMountPoints, workDir) if err != nil { return nil, image, err } @@ -1584,9 +1631,9 @@ func (b *Builder) getBindMount(tokens []string, context *imageTypes.SystemContex return &volumes[0], image, nil } -func (b *Builder) getTmpfsMount(tokens []string, idMaps IDMaps) (*spec.Mount, error) { +func (b *Builder) getTmpfsMount(tokens []string, idMaps IDMaps) (*specs.Mount, error) { var optionMounts []specs.Mount - mount, err := internalParse.GetTmpfsMount(tokens) + mount, err := volumes.GetTmpfsMount(tokens) if err != nil { return nil, err } @@ -1598,7 +1645,7 @@ func (b *Builder) getTmpfsMount(tokens []string, idMaps IDMaps) (*spec.Mount, er return &volumes[0], nil } -func (b *Builder) getSecretMount(tokens []string, secrets map[string]define.Secret, idMaps IDMaps, workdir string) (*spec.Mount, string, error) { +func (b *Builder) getSecretMount(tokens []string, secrets map[string]define.Secret, idMaps IDMaps, workdir string) (*specs.Mount, string, error) { errInvalidSyntax := errors.New("secret should have syntax id=id[,target=path,required=bool,mode=uint,uid=uint,gid=uint") if len(tokens) == 0 { return nil, "", errInvalidSyntax @@ -1622,9 +1669,12 @@ func (b *Builder) getSecretMount(tokens []string, secrets map[string]define.Secr target = filepath.Join(workdir, target) } case "required": - required, err = strconv.ParseBool(kv[1]) - if err != nil { - return nil, "", errInvalidSyntax + required = true + if len(kv) > 1 { + required, err = strconv.ParseBool(kv[1]) + if err != nil { + return nil, "", errInvalidSyntax + } } case "mode": mode64, err := strconv.ParseUint(kv[1], 8, 32) @@ -1723,7 +1773,7 @@ func (b *Builder) getSecretMount(tokens []string, secrets map[string]define.Secr } // getSSHMount parses the --mount type=ssh flag in the Containerfile, checks if there's an ssh source provided, and creates and starts an ssh-agent to be forwarded into the container -func (b *Builder) getSSHMount(tokens []string, count int, sshsources map[string]*sshagent.Source, idMaps IDMaps) (*spec.Mount, *sshagent.AgentServer, error) { +func (b *Builder) getSSHMount(tokens []string, count int, sshsources map[string]*sshagent.Source, idMaps IDMaps) (*specs.Mount, *sshagent.AgentServer, error) { errInvalidSyntax := errors.New("ssh should have syntax id=id[,target=path,required=bool,mode=uint,uid=uint,gid=uint") var err error @@ -1876,7 +1926,6 @@ func (b *Builder) cleanupRunMounts(context *imageTypes.SystemContext, mountpoint return err } } - opts := copier.RemoveOptions{ All: true, } @@ -1897,6 +1946,16 @@ func (b *Builder) cleanupRunMounts(context *imageTypes.SystemContext, mountpoint } } // unlock if any locked files from this RUN statement - internalParse.UnlockLockArray(artifacts.TargetLocks) + volumes.UnlockLockArray(artifacts.TargetLocks) return prevErr } + +// setPdeathsig sets a parent-death signal for the process +// the goroutine that starts the child process should lock itself to +// a native thread using runtime.LockOSThread() until the child exits +func setPdeathsig(cmd *exec.Cmd) { + if cmd.SysProcAttr == nil { + cmd.SysProcAttr = &syscall.SysProcAttr{} + } + cmd.SysProcAttr.Pdeathsig = syscall.SIGKILL +} diff --git a/vendor/github.com/containers/buildah/run_freebsd.go b/vendor/github.com/containers/buildah/run_freebsd.go index 42064a0b..9344876d 100644 --- a/vendor/github.com/containers/buildah/run_freebsd.go +++ b/vendor/github.com/containers/buildah/run_freebsd.go @@ -7,10 +7,8 @@ import ( "errors" "fmt" "os" - "os/exec" "path/filepath" "strings" - "syscall" "unsafe" "github.com/containers/buildah/bind" @@ -18,12 +16,16 @@ import ( "github.com/containers/buildah/copier" "github.com/containers/buildah/define" "github.com/containers/buildah/internal" + "github.com/containers/buildah/internal/tmpdir" "github.com/containers/buildah/pkg/jail" + "github.com/containers/buildah/pkg/overlay" "github.com/containers/buildah/pkg/parse" + butil "github.com/containers/buildah/pkg/util" "github.com/containers/buildah/util" "github.com/containers/common/libnetwork/resolvconf" nettypes "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/config" + cutil "github.com/containers/common/pkg/util" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/lockfile" "github.com/containers/storage/pkg/stringid" @@ -72,7 +74,7 @@ func setChildProcess() error { } func (b *Builder) Run(command []string, options RunOptions) error { - p, err := os.MkdirTemp("", Package) + p, err := os.MkdirTemp(tmpdir.GetTempDir(), define.Package) if err != nil { return err } @@ -147,7 +149,7 @@ func (b *Builder) Run(command []string, options RunOptions) error { setupTerminal(g, options.Terminal, options.TerminalSize) - configureNetwork, configureNetworks, err := b.configureNamespaces(g, &options) + configureNetwork, networkString, err := b.configureNamespaces(g, &options) if err != nil { return err } @@ -197,15 +199,15 @@ func (b *Builder) Run(command []string, options RunOptions) error { rootIDPair := &idtools.IDPair{UID: int(rootUID), GID: int(rootGID)} hostFile := "" - if !options.NoHosts && !contains(volumes, config.DefaultHostsFile) && options.ConfigureNetwork != define.NetworkDisabled { - hostFile, err = b.generateHosts(path, rootIDPair, mountPoint) + if !options.NoHosts && !cutil.StringInSlice(config.DefaultHostsFile, volumes) && options.ConfigureNetwork != define.NetworkDisabled { + hostFile, err = b.generateHosts(path, rootIDPair, mountPoint, spec) if err != nil { return err } bindFiles[config.DefaultHostsFile] = hostFile } - if !contains(volumes, resolvconf.DefaultResolvConf) && options.ConfigureNetwork != define.NetworkDisabled && !(len(b.CommonBuildOpts.DNSServers) == 1 && strings.ToLower(b.CommonBuildOpts.DNSServers[0]) == "none") { + if !cutil.StringInSlice(resolvconf.DefaultResolvConf, volumes) && options.ConfigureNetwork != define.NetworkDisabled && !(len(b.CommonBuildOpts.DNSServers) == 1 && strings.ToLower(b.CommonBuildOpts.DNSServers[0]) == "none") { resolvFile, err := b.addResolvConf(path, rootIDPair, b.CommonBuildOpts.DNSServers, b.CommonBuildOpts.DNSSearch, b.CommonBuildOpts.DNSOptions, nil) if err != nil { return err @@ -282,7 +284,7 @@ func (b *Builder) Run(command []string, options RunOptions) error { } else { moreCreateArgs = nil } - err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, configureNetworks, moreCreateArgs, spec, mountPoint, path, containerName, b.Container, hostFile) + err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, networkString, moreCreateArgs, spec, mountPoint, path, containerName, b.Container, hostFile) case IsolationChroot: err = chroot.RunUsingChroot(spec, path, homeDir, options.Stdin, options.Stdout, options.Stderr) default: @@ -297,7 +299,7 @@ func addCommonOptsToSpec(commonOpts *define.CommonBuildOptions, g *generate.Gene return fmt.Errorf("failed to get container config: %w", err) } // Other process resource limits - if err := addRlimits(commonOpts.Ulimit, g, defaultContainerConfig.Containers.DefaultUlimits); err != nil { + if err := addRlimits(commonOpts.Ulimit, g, defaultContainerConfig.Containers.DefaultUlimits.Get()); err != nil { return err } @@ -324,13 +326,22 @@ func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string, } parseMount := func(mountType, host, container string, options []string) (specs.Mount, error) { - var foundrw, foundro bool + var foundrw, foundro, foundO bool + var upperDir string for _, opt := range options { switch opt { case "rw": foundrw = true case "ro": foundro = true + case "O": + foundO = true + } + if strings.HasPrefix(opt, "upperdir") { + splitOpt := strings.SplitN(opt, "=", 2) + if len(splitOpt) > 1 { + upperDir = splitOpt[1] + } } } if !foundrw && !foundro { @@ -339,6 +350,30 @@ func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string, if mountType == "bind" || mountType == "rbind" { mountType = "nullfs" } + if foundO { + containerDir, err := b.store.ContainerDirectory(b.ContainerID) + if err != nil { + return specs.Mount{}, err + } + + contentDir, err := overlay.TempDir(containerDir, idMaps.rootUID, idMaps.rootGID) + if err != nil { + return specs.Mount{}, fmt.Errorf("failed to create TempDir in the %s directory: %w", containerDir, err) + } + + overlayOpts := overlay.Options{ + RootUID: idMaps.rootUID, + RootGID: idMaps.rootGID, + UpperDirOptionFragment: upperDir, + GraphOpts: b.store.GraphOptions(), + } + + overlayMount, err := overlay.MountWithOptions(contentDir, host, container, &overlayOpts) + if err == nil { + b.TempVolumes[contentDir] = true + } + return overlayMount, err + } return specs.Mount{ Destination: container, Type: mountType, @@ -376,11 +411,16 @@ func setupCapabilities(g *generate.Generator, defaultCapabilities, adds, drops [ return nil } -func (b *Builder) runConfigureNetwork(pid int, isolation define.Isolation, options RunOptions, configureNetworks []string, containerName string) (teardown func(), netStatus map[string]nettypes.StatusBlock, err error) { +func (b *Builder) runConfigureNetwork(pid int, isolation define.Isolation, options RunOptions, networkString string, containerName string) (teardown func(), netStatus map[string]nettypes.StatusBlock, err error) { //if isolation == IsolationOCIRootless { //return setupRootlessNetwork(pid) //} + var configureNetworks []string + if len(networkString) > 0 { + configureNetworks = strings.Split(networkString, ",") + } + if len(configureNetworks) == 0 { configureNetworks = []string{b.NetworkInterface.DefaultNetworkName()} } @@ -415,7 +455,7 @@ func (b *Builder) runConfigureNetwork(pid int, isolation define.Isolation, optio return teardown, nil, nil } -func setupNamespaces(logger *logrus.Logger, g *generate.Generator, namespaceOptions define.NamespaceOptions, idmapOptions define.IDMappingOptions, policy define.NetworkConfigurationPolicy) (configureNetwork bool, configureNetworks []string, configureUTS bool, err error) { +func setupNamespaces(logger *logrus.Logger, g *generate.Generator, namespaceOptions define.NamespaceOptions, idmapOptions define.IDMappingOptions, policy define.NetworkConfigurationPolicy) (configureNetwork bool, networkString string, configureUTS bool, err error) { // Set namespace options in the container configuration. for _, namespaceOption := range namespaceOptions { switch namespaceOption.Name { @@ -423,7 +463,7 @@ func setupNamespaces(logger *logrus.Logger, g *generate.Generator, namespaceOpti configureNetwork = false if !namespaceOption.Host && (namespaceOption.Path == "" || !filepath.IsAbs(namespaceOption.Path)) { if namespaceOption.Path != "" && !filepath.IsAbs(namespaceOption.Path) { - configureNetworks = strings.Split(namespaceOption.Path, ",") + networkString = namespaceOption.Path namespaceOption.Path = "" } configureNetwork = (policy != define.NetworkDisabled) @@ -439,13 +479,13 @@ func setupNamespaces(logger *logrus.Logger, g *generate.Generator, namespaceOpti // equivalents for UTS and and network namespaces. } - return configureNetwork, configureNetworks, configureUTS, nil + return configureNetwork, networkString, configureUTS, nil } -func (b *Builder) configureNamespaces(g *generate.Generator, options *RunOptions) (bool, []string, error) { +func (b *Builder) configureNamespaces(g *generate.Generator, options *RunOptions) (bool, string, error) { defaultNamespaceOptions, err := DefaultNamespaceOptions() if err != nil { - return false, nil, err + return false, "", err } namespaceOptions := defaultNamespaceOptions @@ -466,9 +506,9 @@ func (b *Builder) configureNamespaces(g *generate.Generator, options *RunOptions } } - configureNetwork, configureNetworks, configureUTS, err := setupNamespaces(options.Logger, g, namespaceOptions, b.IDMappingOptions, networkPolicy) + configureNetwork, networkString, configureUTS, err := setupNamespaces(options.Logger, g, namespaceOptions, b.IDMappingOptions, networkPolicy) if err != nil { - return false, nil, err + return false, "", err } if configureUTS { @@ -495,7 +535,7 @@ func (b *Builder) configureNamespaces(g *generate.Generator, options *RunOptions spec.Process.Env = append(spec.Process.Env, fmt.Sprintf("HOSTNAME=%s", spec.Hostname)) } - return configureNetwork, configureNetworks, nil + return configureNetwork, networkString, nil } func runSetupBoundFiles(bundlePath string, bindFiles map[string]string) (mounts []specs.Mount) { @@ -522,7 +562,7 @@ func addRlimits(ulimit []string, g *generate.Generator, defaultUlimits []string) ulimit = append(defaultUlimits, ulimit...) for _, u := range ulimit { - if ul, err = units.ParseUlimit(u); err != nil { + if ul, err = butil.ParseUlimit(u); err != nil { return fmt.Errorf("ulimit option %q requires name=SOFT:HARD, failed to be parsed: %w", u, err) } @@ -531,14 +571,6 @@ func addRlimits(ulimit []string, g *generate.Generator, defaultUlimits []string) return nil } -// setPdeathsig sets a parent-death signal for the process -func setPdeathsig(cmd *exec.Cmd) { - if cmd.SysProcAttr == nil { - cmd.SysProcAttr = &syscall.SysProcAttr{} - } - cmd.SysProcAttr.Pdeathsig = syscall.SIGKILL -} - // Create pipes to use for relaying stdio. func runMakeStdioPipe(uid, gid int) ([][]int, error) { stdioPipe := make([][]int, 3) diff --git a/vendor/github.com/containers/buildah/run_linux.go b/vendor/github.com/containers/buildah/run_linux.go index 19241365..5263abec 100644 --- a/vendor/github.com/containers/buildah/run_linux.go +++ b/vendor/github.com/containers/buildah/run_linux.go @@ -7,30 +7,35 @@ import ( "context" "errors" "fmt" + "net" "os" - "os/exec" "path/filepath" - "strconv" "strings" "syscall" - "time" + "github.com/containernetworking/plugins/pkg/ns" "github.com/containers/buildah/bind" "github.com/containers/buildah/chroot" "github.com/containers/buildah/copier" "github.com/containers/buildah/define" "github.com/containers/buildah/internal" - internalParse "github.com/containers/buildah/internal/parse" + "github.com/containers/buildah/internal/tmpdir" + "github.com/containers/buildah/internal/volumes" "github.com/containers/buildah/pkg/overlay" "github.com/containers/buildah/pkg/parse" + butil "github.com/containers/buildah/pkg/util" "github.com/containers/buildah/util" + "github.com/containers/common/libnetwork/pasta" "github.com/containers/common/libnetwork/resolvconf" + "github.com/containers/common/libnetwork/slirp4netns" nettypes "github.com/containers/common/libnetwork/types" + netUtil "github.com/containers/common/libnetwork/util" "github.com/containers/common/pkg/capabilities" "github.com/containers/common/pkg/chown" "github.com/containers/common/pkg/config" "github.com/containers/common/pkg/hooks" hooksExec "github.com/containers/common/pkg/hooks/exec" + cutil "github.com/containers/common/pkg/util" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/lockfile" @@ -38,7 +43,6 @@ import ( "github.com/containers/storage/pkg/unshare" "github.com/docker/go-units" "github.com/opencontainers/runtime-spec/specs-go" - spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-tools/generate" "github.com/opencontainers/selinux/go-selinux/label" "github.com/sirupsen/logrus" @@ -69,7 +73,7 @@ func setChildProcess() error { // Run runs the specified command in the container's root filesystem. func (b *Builder) Run(command []string, options RunOptions) error { - p, err := os.MkdirTemp("", define.Package) + p, err := os.MkdirTemp(tmpdir.GetTempDir(), define.Package) if err != nil { return err } @@ -155,7 +159,7 @@ func (b *Builder) Run(command []string, options RunOptions) error { for _, m := range g.Mounts() { mounts[m.Destination] = true } - newMounts := []spec.Mount{} + newMounts := []specs.Mount{} for _, d := range b.Devices { // Default permission is read-only. perm := "ro" @@ -164,7 +168,7 @@ func (b *Builder) Run(command []string, options RunOptions) error { if strings.Contains(string(d.Rule.Permissions), "w") { perm = "rw" } - devMnt := spec.Mount{ + devMnt := specs.Mount{ Destination: d.Destination, Type: parse.TypeBind, Source: d.Source, @@ -183,7 +187,7 @@ func (b *Builder) Run(command []string, options RunOptions) error { g.Config.Mounts = append(newMounts, g.Config.Mounts...) } else { for _, d := range b.Devices { - sDev := spec.LinuxDevice{ + sDev := specs.LinuxDevice{ Type: string(d.Type), Path: d.Path, Major: d.Major, @@ -202,16 +206,11 @@ func (b *Builder) Run(command []string, options RunOptions) error { setupTerminal(g, options.Terminal, options.TerminalSize) - configureNetwork, configureNetworks, err := b.configureNamespaces(g, &options) + configureNetwork, networkString, err := b.configureNamespaces(g, &options) if err != nil { return err } - // rootless and networks are not supported - if len(configureNetworks) > 0 && isolation == IsolationOCIRootless { - return errors.New("cannot use networks as rootless") - } - homeDir, err := b.configureUIDGID(g, mountPoint, options) if err != nil { return err @@ -263,27 +262,24 @@ func (b *Builder) Run(command []string, options RunOptions) error { rootIDPair := &idtools.IDPair{UID: int(rootUID), GID: int(rootGID)} hostFile := "" - if !options.NoHosts && !contains(volumes, config.DefaultHostsFile) && options.ConfigureNetwork != define.NetworkDisabled { - hostFile, err = b.generateHosts(path, rootIDPair, mountPoint) + if !options.NoHosts && !cutil.StringInSlice(config.DefaultHostsFile, volumes) && options.ConfigureNetwork != define.NetworkDisabled { + hostFile, err = b.generateHosts(path, rootIDPair, mountPoint, spec) if err != nil { return err } bindFiles[config.DefaultHostsFile] = hostFile } - // generate /etc/hostname if the user intentionally did not override - if !(contains(volumes, "/etc/hostname")) { - if _, ok := bindFiles["/etc/hostname"]; !ok { - hostFile, err := b.generateHostname(path, spec.Hostname, rootIDPair) - if err != nil { - return err - } - // Bind /etc/hostname - bindFiles["/etc/hostname"] = hostFile + if !options.NoHostname && !(cutil.StringInSlice("/etc/hostname", volumes)) { + hostFile, err := b.generateHostname(path, spec.Hostname, rootIDPair) + if err != nil { + return err } + // Bind /etc/hostname + bindFiles["/etc/hostname"] = hostFile } - if !contains(volumes, resolvconf.DefaultResolvConf) && options.ConfigureNetwork != define.NetworkDisabled && !(len(b.CommonBuildOpts.DNSServers) == 1 && strings.ToLower(b.CommonBuildOpts.DNSServers[0]) == "none") { + if !cutil.StringInSlice(resolvconf.DefaultResolvConf, volumes) && options.ConfigureNetwork != define.NetworkDisabled && !(len(b.CommonBuildOpts.DNSServers) == 1 && strings.ToLower(b.CommonBuildOpts.DNSServers[0]) == "none") { resolvFile, err := b.addResolvConf(path, rootIDPair, b.CommonBuildOpts.DNSServers, b.CommonBuildOpts.DNSSearch, b.CommonBuildOpts.DNSOptions, spec.Linux.Namespaces) if err != nil { return err @@ -366,7 +362,7 @@ rootless=%d if options.NoPivot { moreCreateArgs = append(moreCreateArgs, "--no-pivot") } - err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, configureNetworks, moreCreateArgs, spec, + err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, networkString, moreCreateArgs, spec, mountPoint, path, define.Package+"-"+filepath.Base(path), b.Container, hostFile) case IsolationChroot: err = chroot.RunUsingChroot(spec, path, homeDir, options.Stdin, options.Stdout, options.Stderr) @@ -375,7 +371,7 @@ rootless=%d if options.NoPivot { moreCreateArgs = append(moreCreateArgs, "--no-pivot") } - err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, configureNetworks, moreCreateArgs, spec, + err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, networkString, moreCreateArgs, spec, mountPoint, path, define.Package+"-"+filepath.Base(path), b.Container, hostFile) default: err = errors.New("don't know how to run this command") @@ -383,8 +379,8 @@ rootless=%d return err } -func (b *Builder) setupOCIHooks(config *spec.Spec, hasVolumes bool) (map[string][]spec.Hook, error) { - allHooks := make(map[string][]spec.Hook) +func (b *Builder) setupOCIHooks(config *specs.Spec, hasVolumes bool) (map[string][]specs.Hook, error) { + allHooks := make(map[string][]specs.Hook) if len(b.CommonBuildOpts.OCIHooksDir) == 0 { if unshare.IsRootless() { return nil, nil @@ -420,7 +416,7 @@ func (b *Builder) setupOCIHooks(config *spec.Spec, hasVolumes bool) (map[string] } } - hookErr, err := hooksExec.RuntimeConfigFilter(context.Background(), allHooks["precreate"], config, hooksExec.DefaultPostKillTimeout) + hookErr, err := hooksExec.RuntimeConfigFilter(context.Background(), allHooks["precreate"], config, hooksExec.DefaultPostKillTimeout) //nolint:staticcheck if err != nil { logrus.Warnf("Container: precreate hook: %v", err) if hookErr != nil && hookErr != err { @@ -467,7 +463,7 @@ func addCommonOptsToSpec(commonOpts *define.CommonBuildOptions, g *generate.Gene return fmt.Errorf("failed to get container config: %w", err) } // Other process resource limits - if err := addRlimits(commonOpts.Ulimit, g, defaultContainerConfig.Containers.DefaultUlimits); err != nil { + if err := addRlimits(commonOpts.Ulimit, g, defaultContainerConfig.Containers.DefaultUlimits.Get()); err != nil { return err } @@ -475,80 +471,128 @@ func addCommonOptsToSpec(commonOpts *define.CommonBuildOptions, g *generate.Gene return nil } -func setupRootlessNetwork(pid int) (teardown func(), err error) { - slirp4netns, err := exec.LookPath("slirp4netns") +func setupSlirp4netnsNetwork(config *config.Config, netns, cid string, options []string) (func(), map[string]nettypes.StatusBlock, error) { + // we need the TmpDir for the slirp4netns code + if err := os.MkdirAll(config.Engine.TmpDir, 0o751); err != nil { + return nil, nil, fmt.Errorf("failed to create tempdir: %w", err) + } + res, err := slirp4netns.Setup(&slirp4netns.SetupOptions{ + Config: config, + ContainerID: cid, + Netns: netns, + ExtraOptions: options, + Pdeathsig: syscall.SIGKILL, + }) if err != nil { - return nil, err + return nil, nil, err } - rootlessSlirpSyncR, rootlessSlirpSyncW, err := os.Pipe() + ip, err := slirp4netns.GetIP(res.Subnet) if err != nil { - return nil, fmt.Errorf("cannot create slirp4netns sync pipe: %w", err) + return nil, nil, fmt.Errorf("get slirp4netns ip: %w", err) + } + + // create fake status to make sure we get the correct ip in hosts + subnet := nettypes.IPNet{IPNet: net.IPNet{ + IP: *ip, + Mask: res.Subnet.Mask, + }} + netStatus := map[string]nettypes.StatusBlock{ + slirp4netns.BinaryName: { + Interfaces: map[string]nettypes.NetInterface{ + "tap0": { + Subnets: []nettypes.NetAddress{{IPNet: subnet}}, + }, + }, + }, } - defer rootlessSlirpSyncR.Close() - // Be sure there are no fds inherited to slirp4netns except the sync pipe - files, err := os.ReadDir("/proc/self/fd") + return func() { + syscall.Kill(res.Pid, syscall.SIGKILL) // nolint:errcheck + var status syscall.WaitStatus + syscall.Wait4(res.Pid, &status, 0, nil) // nolint:errcheck + }, netStatus, nil +} + +func setupPasta(config *config.Config, netns string, options []string) (func(), map[string]nettypes.StatusBlock, error) { + err := pasta.Setup(&pasta.SetupOptions{ + Config: config, + Netns: netns, + ExtraOptions: options, + }) if err != nil { - return nil, fmt.Errorf("cannot list open fds: %w", err) + return nil, nil, err } - for _, f := range files { - fd, err := strconv.Atoi(f.Name()) - if err != nil { - return nil, fmt.Errorf("cannot parse fd: %w", err) - } - if fd == int(rootlessSlirpSyncW.Fd()) { - continue - } - unix.CloseOnExec(fd) + + var ip string + err = ns.WithNetNSPath(netns, func(_ ns.NetNS) error { + // get the first ip in the netns and use this as our ip for /etc/hosts + ip = netUtil.GetLocalIP() + return nil + }) + if err != nil { + return nil, nil, err } - cmd := exec.Command(slirp4netns, "--mtu", "65520", "-r", "3", "-c", strconv.Itoa(pid), "tap0") - setPdeathsig(cmd) - cmd.Stdin, cmd.Stdout, cmd.Stderr = nil, nil, nil - cmd.ExtraFiles = []*os.File{rootlessSlirpSyncW} + // create fake status to make sure we get the correct ip in hosts + subnet := nettypes.IPNet{IPNet: net.IPNet{ + IP: net.ParseIP(ip), + Mask: net.IPv4Mask(255, 255, 255, 0), + }} + netStatus := map[string]nettypes.StatusBlock{ + slirp4netns.BinaryName: { + Interfaces: map[string]nettypes.NetInterface{ + "tap0": { + Subnets: []nettypes.NetAddress{{IPNet: subnet}}, + }, + }, + }, + } - err = cmd.Start() - rootlessSlirpSyncW.Close() + return nil, netStatus, nil +} + +func (b *Builder) runConfigureNetwork(pid int, isolation define.Isolation, options RunOptions, network, containerName string) (teardown func(), netStatus map[string]nettypes.StatusBlock, err error) { + netns := fmt.Sprintf("/proc/%d/ns/net", pid) + var configureNetworks []string + defConfig, err := config.Default() if err != nil { - return nil, fmt.Errorf("cannot start slirp4netns: %w", err) + return nil, nil, fmt.Errorf("failed to get container config: %w", err) } - b := make([]byte, 1) - for { - if err := rootlessSlirpSyncR.SetDeadline(time.Now().Add(1 * time.Second)); err != nil { - return nil, fmt.Errorf("setting slirp4netns pipe timeout: %w", err) - } - if _, err := rootlessSlirpSyncR.Read(b); err == nil { - break - } else { - if os.IsTimeout(err) { - // Check if the process is still running. - var status syscall.WaitStatus - _, err := syscall.Wait4(cmd.Process.Pid, &status, syscall.WNOHANG, nil) - if err != nil { - return nil, fmt.Errorf("failed to read slirp4netns process status: %w", err) - } - if status.Exited() || status.Signaled() { - return nil, errors.New("slirp4netns failed") - } - - continue - } - return nil, fmt.Errorf("failed to read from slirp4netns sync pipe: %w", err) + name, networkOpts, hasOpts := strings.Cut(network, ":") + var netOpts []string + if hasOpts { + netOpts = strings.Split(networkOpts, ",") + } + if isolation == IsolationOCIRootless && name == "" { + switch defConfig.Network.DefaultRootlessNetworkCmd { + case slirp4netns.BinaryName, "": + name = slirp4netns.BinaryName + case pasta.BinaryName: + name = pasta.BinaryName + default: + return nil, nil, fmt.Errorf("invalid default_rootless_network_cmd option %q", + defConfig.Network.DefaultRootlessNetworkCmd) } } - return func() { - cmd.Process.Kill() // nolint:errcheck - cmd.Wait() // nolint:errcheck - }, nil -} + switch { + case name == slirp4netns.BinaryName: + return setupSlirp4netnsNetwork(defConfig, netns, containerName, netOpts) + case name == pasta.BinaryName: + return setupPasta(defConfig, netns, netOpts) + + // Basically default case except we make sure to not split an empty + // name as this would return a slice with one empty string which is + // not a valid network name. + case len(network) > 0: + // old syntax allow comma separated network names + configureNetworks = strings.Split(network, ",") + } -func (b *Builder) runConfigureNetwork(pid int, isolation define.Isolation, options RunOptions, configureNetworks []string, containerName string) (teardown func(), netStatus map[string]nettypes.StatusBlock, err error) { if isolation == IsolationOCIRootless { - teardown, err = setupRootlessNetwork(pid) - return teardown, nil, err + return nil, nil, errors.New("cannot use networks as rootless") } if len(configureNetworks) == 0 { @@ -560,7 +604,6 @@ func (b *Builder) runConfigureNetwork(pid int, isolation define.Isolation, optio // interfaces. Ensure this by opening a handle to the network // namespace, and using our copy to both configure and // deconfigure it. - netns := fmt.Sprintf("/proc/%d/ns/net", pid) netFD, err := unix.Open(netns, unix.O_RDONLY, 0) if err != nil { return nil, nil, fmt.Errorf("opening network namespace: %w", err) @@ -615,10 +658,10 @@ func runMakeStdioPipe(uid, gid int) ([][]int, error) { return stdioPipe, nil } -func setupNamespaces(logger *logrus.Logger, g *generate.Generator, namespaceOptions define.NamespaceOptions, idmapOptions define.IDMappingOptions, policy define.NetworkConfigurationPolicy) (configureNetwork bool, configureNetworks []string, configureUTS bool, err error) { +func setupNamespaces(logger *logrus.Logger, g *generate.Generator, namespaceOptions define.NamespaceOptions, idmapOptions define.IDMappingOptions, policy define.NetworkConfigurationPolicy) (configureNetwork bool, networkString string, configureUTS bool, err error) { defaultContainerConfig, err := config.Default() if err != nil { - return false, nil, false, fmt.Errorf("failed to get container config: %w", err) + return false, "", false, fmt.Errorf("failed to get container config: %w", err) } addSysctl := func(prefixes []string) error { @@ -644,7 +687,7 @@ func setupNamespaces(logger *logrus.Logger, g *generate.Generator, namespaceOpti case string(specs.IPCNamespace): if !namespaceOption.Host { if err := addSysctl([]string{"fs.mqueue"}); err != nil { - return false, nil, false, err + return false, "", false, err } } case string(specs.UserNamespace): @@ -657,7 +700,7 @@ func setupNamespaces(logger *logrus.Logger, g *generate.Generator, namespaceOpti configureNetwork = false if !namespaceOption.Host && (namespaceOption.Path == "" || !filepath.IsAbs(namespaceOption.Path)) { if namespaceOption.Path != "" && !filepath.IsAbs(namespaceOption.Path) { - configureNetworks = strings.Split(namespaceOption.Path, ",") + networkString = namespaceOption.Path namespaceOption.Path = "" } configureNetwork = (policy != define.NetworkDisabled) @@ -669,30 +712,30 @@ func setupNamespaces(logger *logrus.Logger, g *generate.Generator, namespaceOpti configureUTS = true } if err := addSysctl([]string{"kernel.hostname", "kernel.domainame"}); err != nil { - return false, nil, false, err + return false, "", false, err } } } if namespaceOption.Host { if err := g.RemoveLinuxNamespace(namespaceOption.Name); err != nil { - return false, nil, false, fmt.Errorf("removing %q namespace for run: %w", namespaceOption.Name, err) + return false, "", false, fmt.Errorf("removing %q namespace for run: %w", namespaceOption.Name, err) } } else if err := g.AddOrReplaceLinuxNamespace(namespaceOption.Name, namespaceOption.Path); err != nil { if namespaceOption.Path == "" { - return false, nil, false, fmt.Errorf("adding new %q namespace for run: %w", namespaceOption.Name, err) + return false, "", false, fmt.Errorf("adding new %q namespace for run: %w", namespaceOption.Name, err) } - return false, nil, false, fmt.Errorf("adding %q namespace %q for run: %w", namespaceOption.Name, namespaceOption.Path, err) + return false, "", false, fmt.Errorf("adding %q namespace %q for run: %w", namespaceOption.Name, namespaceOption.Path, err) } } // If we've got mappings, we're going to have to create a user namespace. if len(idmapOptions.UIDMap) > 0 || len(idmapOptions.GIDMap) > 0 || configureUserns { if err := g.AddOrReplaceLinuxNamespace(string(specs.UserNamespace), ""); err != nil { - return false, nil, false, fmt.Errorf("adding new %q namespace for run: %w", string(specs.UserNamespace), err) + return false, "", false, fmt.Errorf("adding new %q namespace for run: %w", string(specs.UserNamespace), err) } hostUidmap, hostGidmap, err := unshare.GetHostIDMappings("") if err != nil { - return false, nil, false, err + return false, "", false, err } for _, m := range idmapOptions.UIDMap { g.AddLinuxUIDMapping(m.HostID, m.ContainerID, m.Size) @@ -712,46 +755,32 @@ func setupNamespaces(logger *logrus.Logger, g *generate.Generator, namespaceOpti } if !specifiedNetwork { if err := g.AddOrReplaceLinuxNamespace(string(specs.NetworkNamespace), ""); err != nil { - return false, nil, false, fmt.Errorf("adding new %q namespace for run: %w", string(specs.NetworkNamespace), err) + return false, "", false, fmt.Errorf("adding new %q namespace for run: %w", string(specs.NetworkNamespace), err) } configureNetwork = (policy != define.NetworkDisabled) } } else { if err := g.RemoveLinuxNamespace(string(specs.UserNamespace)); err != nil { - return false, nil, false, fmt.Errorf("removing %q namespace for run: %w", string(specs.UserNamespace), err) + return false, "", false, fmt.Errorf("removing %q namespace for run: %w", string(specs.UserNamespace), err) } if !specifiedNetwork { if err := g.RemoveLinuxNamespace(string(specs.NetworkNamespace)); err != nil { - return false, nil, false, fmt.Errorf("removing %q namespace for run: %w", string(specs.NetworkNamespace), err) + return false, "", false, fmt.Errorf("removing %q namespace for run: %w", string(specs.NetworkNamespace), err) } } } if configureNetwork { if err := addSysctl([]string{"net"}); err != nil { - return false, nil, false, err - } - for name, val := range define.DefaultNetworkSysctl { - // Check that the sysctl we are adding is actually supported - // by the kernel - p := filepath.Join("/proc/sys", strings.Replace(name, ".", "/", -1)) - _, err := os.Stat(p) - if err != nil && !errors.Is(err, os.ErrNotExist) { - return false, nil, false, err - } - if err == nil { - g.AddLinuxSysctl(name, val) - } else { - logger.Warnf("ignoring sysctl %s since %s doesn't exist", name, p) - } + return false, "", false, err } } - return configureNetwork, configureNetworks, configureUTS, nil + return configureNetwork, networkString, configureUTS, nil } -func (b *Builder) configureNamespaces(g *generate.Generator, options *RunOptions) (bool, []string, error) { +func (b *Builder) configureNamespaces(g *generate.Generator, options *RunOptions) (bool, string, error) { defaultNamespaceOptions, err := DefaultNamespaceOptions() if err != nil { - return false, nil, err + return false, "", err } namespaceOptions := defaultNamespaceOptions @@ -774,9 +803,9 @@ func (b *Builder) configureNamespaces(g *generate.Generator, options *RunOptions if networkPolicy == NetworkDisabled { namespaceOptions.AddOrReplace(define.NamespaceOptions{{Name: string(specs.NetworkNamespace), Host: false}}...) } - configureNetwork, configureNetworks, configureUTS, err := setupNamespaces(options.Logger, g, namespaceOptions, b.IDMappingOptions, networkPolicy) + configureNetwork, networkString, configureUTS, err := setupNamespaces(options.Logger, g, namespaceOptions, b.IDMappingOptions, networkPolicy) if err != nil { - return false, nil, err + return false, "", err } if configureUTS { @@ -803,7 +832,7 @@ func (b *Builder) configureNamespaces(g *generate.Generator, options *RunOptions spec.Process.Env = append(spec.Process.Env, fmt.Sprintf("HOSTNAME=%s", spec.Hostname)) } - return configureNetwork, configureNetworks, nil + return configureNetwork, networkString, nil } func runSetupBoundFiles(bundlePath string, bindFiles map[string]string) (mounts []specs.Mount) { @@ -830,7 +859,7 @@ func addRlimits(ulimit []string, g *generate.Generator, defaultUlimits []string) ulimit = append(defaultUlimits, ulimit...) for _, u := range ulimit { - if ul, err = units.ParseUlimit(u); err != nil { + if ul, err = butil.ParseUlimit(u); err != nil { return fmt.Errorf("ulimit option %q requires name=SOFT:HARD, failed to be parsed: %w", u, err) } @@ -980,32 +1009,13 @@ func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string, } func setupMaskedPaths(g *generate.Generator) { - for _, mp := range []string{ - "/proc/acpi", - "/proc/kcore", - "/proc/keys", - "/proc/latency_stats", - "/proc/timer_list", - "/proc/timer_stats", - "/proc/sched_debug", - "/proc/scsi", - "/sys/firmware", - "/sys/fs/selinux", - "/sys/dev", - } { + for _, mp := range config.DefaultMaskedPaths { g.AddLinuxMaskedPaths(mp) } } func setupReadOnlyPaths(g *generate.Generator) { - for _, rp := range []string{ - "/proc/asound", - "/proc/bus", - "/proc/fs", - "/proc/irq", - "/proc/sys", - "/proc/sysrq-trigger", - } { + for _, rp := range config.DefaultReadOnlyPaths { g.AddLinuxReadonlyPaths(rp) } } @@ -1069,7 +1079,7 @@ func setupCapabilities(g *generate.Generator, defaultCapabilities, adds, drops [ return setupCapDrop(g, drops...) } -func addOrReplaceMount(mounts []specs.Mount, mount specs.Mount) []spec.Mount { +func addOrReplaceMount(mounts []specs.Mount, mount specs.Mount) []specs.Mount { for i := range mounts { if mounts[i].Destination == mount.Destination { mounts[i] = mount @@ -1082,7 +1092,7 @@ func addOrReplaceMount(mounts []specs.Mount, mount specs.Mount) []spec.Mount { // setupSpecialMountSpecChanges creates special mounts for depending on the namespaces // logic taken from podman and adapted for buildah // https://github.com/containers/podman/blob/4ba71f955a944790edda6e007e6d074009d437a7/pkg/specgen/generate/oci.go#L178 -func setupSpecialMountSpecChanges(spec *spec.Spec, shmSize string) ([]specs.Mount, error) { +func setupSpecialMountSpecChanges(spec *specs.Spec, shmSize string) ([]specs.Mount, error) { mounts := spec.Mounts isRootless := unshare.IsRootless() isNewUserns := false @@ -1198,7 +1208,7 @@ func setupSpecialMountSpecChanges(spec *spec.Spec, shmSize string) ([]specs.Moun return mounts, nil } -func checkIdsGreaterThan5(ids []spec.LinuxIDMapping) bool { +func checkIdsGreaterThan5(ids []specs.LinuxIDMapping) bool { for _, r := range ids { if r.ContainerID <= 5 && 5 < r.ContainerID+r.Size { return true @@ -1208,9 +1218,9 @@ func checkIdsGreaterThan5(ids []spec.LinuxIDMapping) bool { } // If this function succeeds and returns a non-nil *lockfile.LockFile, the caller must unlock it (when??). -func (b *Builder) getCacheMount(tokens []string, stageMountPoints map[string]internal.StageMountDetails, idMaps IDMaps, workDir string) (*spec.Mount, *lockfile.LockFile, error) { +func (b *Builder) getCacheMount(tokens []string, stageMountPoints map[string]internal.StageMountDetails, idMaps IDMaps, workDir string) (*specs.Mount, *lockfile.LockFile, error) { var optionMounts []specs.Mount - mount, targetLock, err := internalParse.GetCacheMount(tokens, b.store, b.MountLabel, stageMountPoints, workDir) + mount, targetLock, err := volumes.GetCacheMount(tokens, b.store, b.MountLabel, stageMountPoints, workDir) if err != nil { return nil, nil, err } @@ -1228,11 +1238,3 @@ func (b *Builder) getCacheMount(tokens []string, stageMountPoints map[string]int succeeded = true return &volumes[0], targetLock, nil } - -// setPdeathsig sets a parent-death signal for the process -func setPdeathsig(cmd *exec.Cmd) { - if cmd.SysProcAttr == nil { - cmd.SysProcAttr = &syscall.SysProcAttr{} - } - cmd.SysProcAttr.Pdeathsig = syscall.SIGKILL -} diff --git a/vendor/github.com/containers/buildah/selinux_tag.sh b/vendor/github.com/containers/buildah/selinux_tag.sh deleted file mode 100644 index 993630ad..00000000 --- a/vendor/github.com/containers/buildah/selinux_tag.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/usr/bin/env bash -if pkg-config libselinux 2> /dev/null ; then - echo selinux -fi diff --git a/vendor/github.com/containers/buildah/util/types.go b/vendor/github.com/containers/buildah/util/types.go index 12546dbd..91c9ace1 100644 --- a/vendor/github.com/containers/buildah/util/types.go +++ b/vendor/github.com/containers/buildah/util/types.go @@ -10,11 +10,11 @@ const ( ) var ( - // DefaultCapabilities is the list of capabilities which we grant by - // default to containers which are running under UID 0. - DefaultCapabilities = define.DefaultCapabilities + // Deprecated: DefaultCapabilities values should be retrieved from + // github.com/containers/common/pkg/config + DefaultCapabilities = define.DefaultCapabilities //nolint - // DefaultNetworkSysctl is the list of Kernel parameters which we - // grant by default to containers which are running under UID 0. - DefaultNetworkSysctl = define.DefaultNetworkSysctl + // Deprecated: DefaultNetworkSysctl values should be retrieved from + // github.com/containers/common/pkg/config + DefaultNetworkSysctl = define.DefaultNetworkSysctl //nolint ) diff --git a/vendor/github.com/containers/buildah/util/util_unix.go b/vendor/github.com/containers/buildah/util/util_unix.go index 8048e26a..85fc6dd7 100644 --- a/vendor/github.com/containers/buildah/util/util_unix.go +++ b/vendor/github.com/containers/buildah/util/util_unix.go @@ -5,32 +5,9 @@ package util import ( "os" - "sync" "syscall" ) -type hardlinkDeviceAndInode struct { - device, inode uint64 -} - -type HardlinkChecker struct { - hardlinks sync.Map -} - -func (h *HardlinkChecker) Check(fi os.FileInfo) string { - if st, ok := fi.Sys().(*syscall.Stat_t); ok && fi.Mode().IsRegular() && st.Nlink > 1 { - if name, ok := h.hardlinks.Load(makeHardlinkDeviceAndInode(st)); ok && name.(string) != "" { - return name.(string) - } - } - return "" -} -func (h *HardlinkChecker) Add(fi os.FileInfo, name string) { - if st, ok := fi.Sys().(*syscall.Stat_t); ok && fi.Mode().IsRegular() && st.Nlink > 1 { - h.hardlinks.Store(makeHardlinkDeviceAndInode(st), name) - } -} - func UID(st os.FileInfo) int { return int(st.Sys().(*syscall.Stat_t).Uid) } diff --git a/vendor/github.com/containers/buildah/util/util_windows.go b/vendor/github.com/containers/buildah/util/util_windows.go index 18965ab1..d11e894e 100644 --- a/vendor/github.com/containers/buildah/util/util_windows.go +++ b/vendor/github.com/containers/buildah/util/util_windows.go @@ -1,3 +1,4 @@ +//go:build !linux && !darwin // +build !linux,!darwin package util @@ -6,15 +7,6 @@ import ( "os" ) -type HardlinkChecker struct { -} - -func (h *HardlinkChecker) Check(fi os.FileInfo) string { - return "" -} -func (h *HardlinkChecker) Add(fi os.FileInfo, name string) { -} - func UID(st os.FileInfo) int { return 0 } diff --git a/vendor/github.com/containers/common/internal/attributedstring/slice.go b/vendor/github.com/containers/common/internal/attributedstring/slice.go new file mode 100644 index 00000000..ad4acc5e --- /dev/null +++ b/vendor/github.com/containers/common/internal/attributedstring/slice.go @@ -0,0 +1,102 @@ +package attributedstring + +import ( + "bytes" + "fmt" + + "github.com/BurntSushi/toml" +) + +// Slice allows for extending a TOML string array with custom +// attributes that control how the array is marshaled into a Go string. +// +// Specifically, an Slice can be configured to avoid it being +// overridden by a subsequent unmarshal sequence. When the `append` attribute +// is specified, the array will be appended instead (e.g., `array=["9", +// {append=true}]`). +type Slice struct { // A "mixed-type array" in TOML. + // Note that the fields below _must_ be exported. Otherwise the TOML + // encoder would fail during type reflection. + Values []string + Attributes struct { // Using a struct allows for adding more attributes in the future. + Append *bool // Nil if not set by the user + } +} + +// NewSlice creates a new slice with the specified values. +func NewSlice(values []string) Slice { + return Slice{Values: values} +} + +// Get returns the Slice values or an empty string slice. +func (a *Slice) Get() []string { + if a.Values == nil { + return []string{} + } + return a.Values +} + +// Set overrides the values of the Slice. +func (a *Slice) Set(values []string) { + a.Values = values +} + +// UnmarshalTOML is the custom unmarshal method for Slice. +func (a *Slice) UnmarshalTOML(data interface{}) error { + iFaceSlice, ok := data.([]interface{}) + if !ok { + return fmt.Errorf("unable to cast to interface array: %v", data) + } + + var loadedStrings []string + for _, x := range iFaceSlice { // Iterate over each item in the slice. + switch val := x.(type) { + case string: // Strings are directly appended to the slice. + loadedStrings = append(loadedStrings, val) + case map[string]interface{}: // The attribute struct is represented as a map. + for k, v := range val { // Iterate over all _supported_ keys. + switch k { + case "append": + boolVal, ok := v.(bool) + if !ok { + return fmt.Errorf("unable to cast append to bool: %v", k) + } + a.Attributes.Append = &boolVal + default: // Unsupported map key. + return fmt.Errorf("unsupported key %q in map: %v", k, val) + } + } + default: // Unsupported item. + return fmt.Errorf("unsupported item in attributed string slice: %v", x) + } + } + + if a.Attributes.Append != nil && *a.Attributes.Append { // If _explicitly_ configured, append the loaded slice. + a.Values = append(a.Values, loadedStrings...) + } else { // Default: override the existing Slice. + a.Values = loadedStrings + } + return nil +} + +// MarshalTOML is the custom marshal method for Slice. +func (a *Slice) MarshalTOML() ([]byte, error) { + iFaceSlice := make([]interface{}, 0, len(a.Values)) + + for _, x := range a.Values { + iFaceSlice = append(iFaceSlice, x) + } + + if a.Attributes.Append != nil { + Attributes := make(map[string]any) + Attributes["append"] = *a.Attributes.Append + iFaceSlice = append(iFaceSlice, Attributes) + } + + buf := new(bytes.Buffer) + enc := toml.NewEncoder(buf) + if err := enc.Encode(iFaceSlice); err != nil { + return nil, err + } + return buf.Bytes(), nil +} diff --git a/vendor/github.com/containers/common/libimage/copier.go b/vendor/github.com/containers/common/libimage/copier.go index 7a6f1f1b..d6acc732 100644 --- a/vendor/github.com/containers/common/libimage/copier.go +++ b/vendor/github.com/containers/common/libimage/copier.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package libimage import ( @@ -11,6 +14,7 @@ import ( "time" "github.com/containers/common/libimage/manifests" + "github.com/containers/common/libimage/platform" "github.com/containers/common/pkg/config" "github.com/containers/common/pkg/retry" "github.com/containers/image/v5/copy" @@ -72,7 +76,7 @@ type CopyOptions struct { // Default 3. MaxRetries *uint // RetryDelay used for the exponential back off of MaxRetries. - // Default 1 time.Scond. + // Default 1 time.Second. RetryDelay *time.Duration // ManifestMIMEType is the desired media type the image will be // converted to if needed. Note that it must contain the exact MIME @@ -239,7 +243,7 @@ func (r *Runtime) newCopier(options *CopyOptions) (*copier, error) { c.systemContext.DockerArchiveAdditionalTags = options.dockerArchiveAdditionalTags - c.systemContext.OSChoice, c.systemContext.ArchitectureChoice, c.systemContext.VariantChoice = NormalizePlatform(options.OS, options.Architecture, options.Variant) + c.systemContext.OSChoice, c.systemContext.ArchitectureChoice, c.systemContext.VariantChoice = platform.Normalize(options.OS, options.Architecture, options.Variant) if options.SignaturePolicyPath != "" { c.systemContext.SignaturePolicyPath = options.SignaturePolicyPath @@ -360,7 +364,11 @@ func (c *copier) copy(ctx context.Context, source, destination types.ImageRefere defer cancel() defer timer.Stop() - fmt.Fprintf(c.imageCopyOptions.ReportWriter, "Pulling image %s inside systemd: setting pull timeout to %s\n", source.DockerReference(), time.Duration(numExtensions)*extension) + fmt.Fprintf(c.imageCopyOptions.ReportWriter, + "Pulling image %s inside systemd: setting pull timeout to %s\n", + source.StringWithinTransport(), + time.Duration(numExtensions)*extension, + ) // From `man systemd.service(5)`: // diff --git a/vendor/github.com/containers/common/libimage/define/platform.go b/vendor/github.com/containers/common/libimage/define/platform.go new file mode 100644 index 00000000..7e13abff --- /dev/null +++ b/vendor/github.com/containers/common/libimage/define/platform.go @@ -0,0 +1,11 @@ +package define + +// PlatformPolicy controls the behavior of image-platform matching. +type PlatformPolicy int + +const ( + // Only debug log if an image does not match the expected platform. + PlatformPolicyDefault PlatformPolicy = iota + // Warn if an image does not match the expected platform. + PlatformPolicyWarn +) diff --git a/vendor/github.com/containers/common/libimage/disk_usage.go b/vendor/github.com/containers/common/libimage/disk_usage.go index f27edc03..765b0df8 100644 --- a/vendor/github.com/containers/common/libimage/disk_usage.go +++ b/vendor/github.com/containers/common/libimage/disk_usage.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package libimage import ( diff --git a/vendor/github.com/containers/common/libimage/events.go b/vendor/github.com/containers/common/libimage/events.go index c7733564..5d82efa6 100644 --- a/vendor/github.com/containers/common/libimage/events.go +++ b/vendor/github.com/containers/common/libimage/events.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package libimage import ( diff --git a/vendor/github.com/containers/common/libimage/filters.go b/vendor/github.com/containers/common/libimage/filters.go index fdde5e83..b51853af 100644 --- a/vendor/github.com/containers/common/libimage/filters.go +++ b/vendor/github.com/containers/common/libimage/filters.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package libimage import ( @@ -11,7 +14,6 @@ import ( filtersPkg "github.com/containers/common/pkg/filters" "github.com/containers/common/pkg/timetype" "github.com/containers/image/v5/docker/reference" - "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" ) @@ -290,7 +292,7 @@ func filterReferences(r *Runtime, value string) filterFunc { refString := ref.String() // FQN with tag/digest candidates := []string{refString} - // Split the reference into 3 components (twice if diggested/tagged): + // Split the reference into 3 components (twice if digested/tagged): // 1) Fully-qualified reference // 2) Without domain // 3) Without domain and path @@ -394,18 +396,17 @@ func filterDangling(ctx context.Context, value bool, tree *layerTree) filterFunc // filterID creates an image-ID filter for matching the specified value. func filterID(value string) filterFunc { return func(img *Image) (bool, error) { - return img.ID() == value, nil + return strings.HasPrefix(img.ID(), value), nil } } // filterDigest creates a digest filter for matching the specified value. func filterDigest(value string) (filterFunc, error) { - d, err := digest.Parse(value) - if err != nil { - return nil, fmt.Errorf("invalid value %q for digest filter: %w", value, err) + if !strings.HasPrefix(value, "sha256:") { + return nil, fmt.Errorf("invalid value %q for digest filter", value) } return func(img *Image) (bool, error) { - return img.hasDigest(d), nil + return img.containsDigestPrefix(value), nil }, nil } diff --git a/vendor/github.com/containers/common/libimage/history.go b/vendor/github.com/containers/common/libimage/history.go index ad989b52..ccd81096 100644 --- a/vendor/github.com/containers/common/libimage/history.go +++ b/vendor/github.com/containers/common/libimage/history.go @@ -1,10 +1,12 @@ +//go:build !remote +// +build !remote + package libimage import ( "context" + "fmt" "time" - - "github.com/containers/storage" ) // ImageHistory contains the history information of an image. @@ -29,17 +31,19 @@ func (i *Image) History(ctx context.Context) ([]ImageHistory, error) { return nil, err } - var allHistory []ImageHistory - var layer *storage.Layer + var nextNode *layerNode if i.TopLayer() != "" { - layer, err = i.runtime.store.Layer(i.TopLayer()) + layer, err := i.runtime.store.Layer(i.TopLayer()) if err != nil { return nil, err } + nextNode = layerTree.node(layer.ID) } // Iterate in reverse order over the history entries, and lookup the - // corresponding image ID, size and get the next later if needed. + // corresponding image ID, size. If it's a non-empty history entry, + // pick the next "storage" layer by walking the layer tree. + var allHistory []ImageHistory numHistories := len(ociImage.History) - 1 usedIDs := make(map[string]bool) // prevents assigning images IDs more than once for x := numHistories; x >= 0; x-- { @@ -50,31 +54,25 @@ func (i *Image) History(ctx context.Context) ([]ImageHistory, error) { Comment: ociImage.History[x].Comment, } - if layer != nil { - if !ociImage.History[x].EmptyLayer { - history.Size = layer.UncompressedSize + if nextNode != nil && len(nextNode.images) > 0 { + id := nextNode.images[0].ID() // always use the first one + if _, used := usedIDs[id]; !used { + history.ID = id + usedIDs[id] = true } - // Query the layer tree if it's the top layer of an - // image. - node := layerTree.node(layer.ID) - if len(node.images) > 0 { - id := node.images[0].ID() // always use the first one - if _, used := usedIDs[id]; !used { - history.ID = id - usedIDs[id] = true - } - for i := range node.images { - history.Tags = append(history.Tags, node.images[i].Names()...) - } + for i := range nextNode.images { + history.Tags = append(history.Tags, nextNode.images[i].Names()...) } - if layer.Parent == "" { - layer = nil - } else if !ociImage.History[x].EmptyLayer { - layer, err = i.runtime.store.Layer(layer.Parent) - if err != nil { - return nil, err - } + } + + if !ociImage.History[x].EmptyLayer { + if nextNode == nil { // If no layer's left, something's wrong. + return nil, fmt.Errorf("no layer left for non-empty history entry: %v", history) } + + history.Size = nextNode.layer.UncompressedSize + + nextNode = nextNode.parent } allHistory = append(allHistory, history) diff --git a/vendor/github.com/containers/common/libimage/image.go b/vendor/github.com/containers/common/libimage/image.go index 640968fd..4d106d42 100644 --- a/vendor/github.com/containers/common/libimage/image.go +++ b/vendor/github.com/containers/common/libimage/image.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package libimage import ( @@ -9,6 +12,8 @@ import ( "strings" "time" + "github.com/containerd/containerd/platforms" + "github.com/containers/common/libimage/platform" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/manifest" storageTransport "github.com/containers/image/v5/storage" @@ -169,6 +174,17 @@ func (i *Image) hasDigest(wantedDigest digest.Digest) bool { return false } +// containsDigestPrefix returns whether the specified value matches any digest of the +// image. It checks for the prefix and not a full match. +func (i *Image) containsDigestPrefix(wantedDigestPrefix string) bool { + for _, d := range i.Digests() { + if strings.HasPrefix(d.String(), wantedDigestPrefix) { + return true + } + } + return false +} + // IsReadOnly returns whether the image is set read only. func (i *Image) IsReadOnly() bool { return i.storageImage.ReadOnly @@ -567,8 +583,7 @@ func (i *Image) Tag(name string) error { defer i.runtime.writeEvent(&Event{ID: i.ID(), Name: name, Time: time.Now(), Type: EventTypeImageTag}) } - newNames := append(i.Names(), ref.String()) - if err := i.runtime.store.SetNames(i.ID(), newNames); err != nil { + if err := i.runtime.store.AddNames(i.ID(), []string{ref.String()}); err != nil { return err } @@ -592,7 +607,7 @@ func (i *Image) Untag(name string) error { ref, err := NormalizeName(name) if err != nil { - return fmt.Errorf("normalizing name %q: %w", name, err) + return err } // FIXME: this is breaking Podman CI but must be re-enabled once @@ -607,26 +622,25 @@ func (i *Image) Untag(name string) error { name = ref.String() - logrus.Debugf("Untagging %q from image %s", ref.String(), i.ID()) - if i.runtime.eventChannel != nil { - defer i.runtime.writeEvent(&Event{ID: i.ID(), Name: name, Time: time.Now(), Type: EventTypeImageUntag}) - } - - removedName := false - newNames := []string{} + foundName := false for _, n := range i.Names() { if n == name { - removedName = true - continue + foundName = true + break } - newNames = append(newNames, n) } - - if !removedName { + // Return an error if the name is not found, the c/storage + // RemoveNames() API does not create one if no match is found. + if !foundName { return fmt.Errorf("%s: %w", name, errTagUnknown) } - if err := i.runtime.store.SetNames(i.ID(), newNames); err != nil { + logrus.Debugf("Untagging %q from image %s", ref.String(), i.ID()) + if i.runtime.eventChannel != nil { + defer i.runtime.writeEvent(&Event{ID: i.ID(), Name: name, Time: time.Now(), Type: EventTypeImageUntag}) + } + + if err := i.runtime.store.RemoveNames(i.ID(), []string{name}); err != nil { return err } @@ -1010,3 +1024,35 @@ func getImageID(ctx context.Context, src types.ImageReference, sys *types.System } return "@" + imageDigest.Encoded(), nil } + +// Checks whether the image matches the specified platform. +// Returns +// - 1) a matching error that can be used for logging (or returning) what does not match +// - 2) a bool indicating whether architecture, os or variant were set (some callers need that to decide whether they need to throw an error) +// - 3) a fatal error that occurred prior to check for matches (e.g., storage errors etc.) +func (i *Image) matchesPlatform(ctx context.Context, os, arch, variant string) (error, bool, error) { + if err := i.isCorrupted(""); err != nil { + return err, false, nil + } + inspectInfo, err := i.inspectInfo(ctx) + if err != nil { + return nil, false, fmt.Errorf("inspecting image: %w", err) + } + + customPlatform := len(os)+len(arch)+len(variant) != 0 + + expected, err := platforms.Parse(platform.ToString(os, arch, variant)) + if err != nil { + return nil, false, fmt.Errorf("parsing host platform: %v", err) + } + fromImage, err := platforms.Parse(platform.ToString(inspectInfo.Os, inspectInfo.Architecture, inspectInfo.Variant)) + if err != nil { + return nil, false, fmt.Errorf("parsing image platform: %v", err) + } + + if platforms.NewMatcher(expected).Match(fromImage) { + return nil, customPlatform, nil + } + + return fmt.Errorf("image platform (%s) does not match the expected platform (%s)", platforms.Format(fromImage), platforms.Format(expected)), customPlatform, nil +} diff --git a/vendor/github.com/containers/common/libimage/image_config.go b/vendor/github.com/containers/common/libimage/image_config.go index 69b7debd..9f5841fe 100644 --- a/vendor/github.com/containers/common/libimage/image_config.go +++ b/vendor/github.com/containers/common/libimage/image_config.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package libimage import ( diff --git a/vendor/github.com/containers/common/libimage/image_tree.go b/vendor/github.com/containers/common/libimage/image_tree.go index 9c958ce6..8143d377 100644 --- a/vendor/github.com/containers/common/libimage/image_tree.go +++ b/vendor/github.com/containers/common/libimage/image_tree.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package libimage import ( diff --git a/vendor/github.com/containers/common/libimage/import.go b/vendor/github.com/containers/common/libimage/import.go index b276c365..5519f02b 100644 --- a/vendor/github.com/containers/common/libimage/import.go +++ b/vendor/github.com/containers/common/libimage/import.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package libimage import ( diff --git a/vendor/github.com/containers/common/libimage/inspect.go b/vendor/github.com/containers/common/libimage/inspect.go index c6632d9a..1003b648 100644 --- a/vendor/github.com/containers/common/libimage/inspect.go +++ b/vendor/github.com/containers/common/libimage/inspect.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package libimage import ( diff --git a/vendor/github.com/containers/common/libimage/layer_tree.go b/vendor/github.com/containers/common/libimage/layer_tree.go index e6b012f9..71eafb0e 100644 --- a/vendor/github.com/containers/common/libimage/layer_tree.go +++ b/vendor/github.com/containers/common/libimage/layer_tree.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package libimage import ( @@ -199,6 +202,17 @@ func (t *layerTree) children(ctx context.Context, parent *Image, all bool) ([]*I if parent.TopLayer() == "" { for i := range t.emptyImages { empty := t.emptyImages[i] + isManifest, err := empty.IsManifestList(ctx) + if err != nil { + return nil, err + } + if isManifest { + // If this is a manifest list and is already + // marked as empty then no instance can be + // selected from this list therefore its + // better to skip this. + continue + } isParent, err := checkParent(empty) if err != nil { return nil, err @@ -289,6 +303,17 @@ func (t *layerTree) parent(ctx context.Context, child *Image) (*Image, error) { if childID == empty.ID() { continue } + isManifest, err := empty.IsManifestList(ctx) + if err != nil { + return nil, err + } + if isManifest { + // If this is a manifest list and is already + // marked as empty then no instance can be + // selected from this list therefore its + // better to skip this. + continue + } emptyOCI, err := t.toOCI(ctx, empty) if err != nil { if ErrorIsImageUnknown(err) { diff --git a/vendor/github.com/containers/common/libimage/load.go b/vendor/github.com/containers/common/libimage/load.go index 72c56f12..36283a99 100644 --- a/vendor/github.com/containers/common/libimage/load.go +++ b/vendor/github.com/containers/common/libimage/load.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package libimage import ( diff --git a/vendor/github.com/containers/common/libimage/manifest_list.go b/vendor/github.com/containers/common/libimage/manifest_list.go index 83989236..c36bfda9 100644 --- a/vendor/github.com/containers/common/libimage/manifest_list.go +++ b/vendor/github.com/containers/common/libimage/manifest_list.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package libimage import ( @@ -440,6 +443,8 @@ func (m *ManifestList) Push(ctx context.Context, destination string, options *Ma SignSigstorePrivateKeyPassphrase: options.SignSigstorePrivateKeyPassphrase, RemoveSignatures: options.RemoveSignatures, ManifestType: options.ManifestMIMEType, + MaxRetries: options.MaxRetries, + RetryDelay: options.RetryDelay, ForceCompressionFormat: options.ForceCompressionFormat, } diff --git a/vendor/github.com/containers/common/libimage/manifests/manifests.go b/vendor/github.com/containers/common/libimage/manifests/manifests.go index 3c066e04..d28ac87b 100644 --- a/vendor/github.com/containers/common/libimage/manifests/manifests.go +++ b/vendor/github.com/containers/common/libimage/manifests/manifests.go @@ -4,11 +4,12 @@ import ( "context" "encoding/json" "errors" - stderrors "errors" "fmt" "io" + "time" "github.com/containers/common/pkg/manifests" + "github.com/containers/common/pkg/retry" "github.com/containers/common/pkg/supplemented" cp "github.com/containers/image/v5/copy" "github.com/containers/image/v5/docker/reference" @@ -28,6 +29,10 @@ import ( "github.com/sirupsen/logrus" ) +const ( + defaultMaxRetries = 3 +) + const instancesData = "instances.json" // LookupReferenceFunc return an image reference based on the specified one. @@ -38,7 +43,7 @@ type LookupReferenceFunc func(ref types.ImageReference) (types.ImageReference, e // ErrListImageUnknown is returned when we attempt to create an image reference // for a List that has not yet been saved to an image. -var ErrListImageUnknown = stderrors.New("unable to determine which image holds the manifest list") +var ErrListImageUnknown = errors.New("unable to determine which image holds the manifest list") type list struct { manifests.List @@ -73,6 +78,11 @@ type PushOptions struct { SourceFilter LookupReferenceFunc // filter the list source AddCompression []string // add existing instances with requested compression algorithms to manifest list ForceCompressionFormat bool // force push with requested compression ignoring the blobs which can be reused. + // Maximum number of retries with exponential backoff when facing + // transient network errors. Default 3. + MaxRetries *uint + // RetryDelay used for the exponential back off of MaxRetries. + RetryDelay *time.Duration } // Create creates a new list containing information about the specified image, @@ -263,16 +273,31 @@ func (l *list) Push(ctx context.Context, dest types.ImageReference, options Push ForceCompressionFormat: options.ForceCompressionFormat, } - // Copy whatever we were asked to copy. - manifestBytes, err := cp.Image(ctx, policyContext, dest, src, copyOptions) - if err != nil { - return nil, "", err + retryOptions := retry.Options{} + retryOptions.MaxRetry = defaultMaxRetries + if options.MaxRetries != nil { + retryOptions.MaxRetry = int(*options.MaxRetries) } - manifestDigest, err := manifest.Digest(manifestBytes) - if err != nil { - return nil, "", err + if options.RetryDelay != nil { + retryOptions.Delay = *options.RetryDelay + } + + // Copy whatever we were asked to copy. + var manifestDigest digest.Digest + f := func() error { + opts := copyOptions + var manifestBytes []byte + var digest digest.Digest + var err error + if manifestBytes, err = cp.Image(ctx, policyContext, dest, src, opts); err == nil { + if digest, err = manifest.Digest(manifestBytes); err == nil { + manifestDigest = digest + } + } + return err } - return nil, manifestDigest, nil + err = retry.IfNecessary(ctx, f, &retryOptions) + return nil, manifestDigest, err } func prepareAddWithCompression(variants []string) ([]cp.OptionCompressionVariant, error) { diff --git a/vendor/github.com/containers/common/libimage/normalize.go b/vendor/github.com/containers/common/libimage/normalize.go index 9619b1a0..2b340286 100644 --- a/vendor/github.com/containers/common/libimage/normalize.go +++ b/vendor/github.com/containers/common/libimage/normalize.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package libimage import ( diff --git a/vendor/github.com/containers/common/libimage/oci.go b/vendor/github.com/containers/common/libimage/oci.go index b88d6613..fcbd10ad 100644 --- a/vendor/github.com/containers/common/libimage/oci.go +++ b/vendor/github.com/containers/common/libimage/oci.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package libimage import ( diff --git a/vendor/github.com/containers/common/libimage/platform.go b/vendor/github.com/containers/common/libimage/platform.go index 06c15ee6..c378bc27 100644 --- a/vendor/github.com/containers/common/libimage/platform.go +++ b/vendor/github.com/containers/common/libimage/platform.go @@ -1,100 +1,29 @@ +//go:build !remote +// +build !remote + package libimage import ( - "context" - "fmt" - "runtime" - - "github.com/containerd/containerd/platforms" - v1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/sirupsen/logrus" + "github.com/containers/common/libimage/define" + "github.com/containers/common/libimage/platform" ) // PlatformPolicy controls the behavior of image-platform matching. -type PlatformPolicy int +// Deprecated: new code should use define.PlatformPolicy directly. +type PlatformPolicy = define.PlatformPolicy const ( // Only debug log if an image does not match the expected platform. - PlatformPolicyDefault PlatformPolicy = iota + // Deprecated: new code should reference define.PlatformPolicyDefault directly. + PlatformPolicyDefault = define.PlatformPolicyDefault // Warn if an image does not match the expected platform. - PlatformPolicyWarn + // Deprecated: new code should reference define.PlatformPolicyWarn directly. + PlatformPolicyWarn = define.PlatformPolicyWarn ) // NormalizePlatform normalizes (according to the OCI spec) the specified os, -// arch and variant. If left empty, the individual item will be normalized. +// arch and variant. If left empty, the individual item will be normalized. +// Deprecated: new code should call libimage/platform.Normalize() instead. func NormalizePlatform(rawOS, rawArch, rawVariant string) (os, arch, variant string) { - platformSpec := v1.Platform{ - OS: rawOS, - Architecture: rawArch, - Variant: rawVariant, - } - normalizedSpec := platforms.Normalize(platformSpec) - if normalizedSpec.Variant == "" && rawVariant != "" { - normalizedSpec.Variant = rawVariant - } - rawPlatform := toPlatformString(normalizedSpec.OS, normalizedSpec.Architecture, normalizedSpec.Variant) - normalizedPlatform, err := platforms.Parse(rawPlatform) - if err != nil { - logrus.Debugf("Error normalizing platform: %v", err) - return rawOS, rawArch, rawVariant - } - logrus.Debugf("Normalized platform %s to %s", rawPlatform, normalizedPlatform) - os = rawOS - if rawOS != "" { - os = normalizedPlatform.OS - } - arch = rawArch - if rawArch != "" { - arch = normalizedPlatform.Architecture - } - variant = rawVariant - if rawVariant != "" || (rawVariant == "" && normalizedPlatform.Variant != "") { - variant = normalizedPlatform.Variant - } - return os, arch, variant -} - -func toPlatformString(os, arch, variant string) string { - if os == "" { - os = runtime.GOOS - } - if arch == "" { - arch = runtime.GOARCH - } - if variant == "" { - return fmt.Sprintf("%s/%s", os, arch) - } - return fmt.Sprintf("%s/%s/%s", os, arch, variant) -} - -// Checks whether the image matches the specified platform. -// Returns -// - 1) a matching error that can be used for logging (or returning) what does not match -// - 2) a bool indicating whether architecture, os or variant were set (some callers need that to decide whether they need to throw an error) -// - 3) a fatal error that occurred prior to check for matches (e.g., storage errors etc.) -func (i *Image) matchesPlatform(ctx context.Context, os, arch, variant string) (error, bool, error) { - if err := i.isCorrupted(""); err != nil { - return err, false, nil - } - inspectInfo, err := i.inspectInfo(ctx) - if err != nil { - return nil, false, fmt.Errorf("inspecting image: %w", err) - } - - customPlatform := len(os)+len(arch)+len(variant) != 0 - - expected, err := platforms.Parse(toPlatformString(os, arch, variant)) - if err != nil { - return nil, false, fmt.Errorf("parsing host platform: %v", err) - } - fromImage, err := platforms.Parse(toPlatformString(inspectInfo.Os, inspectInfo.Architecture, inspectInfo.Variant)) - if err != nil { - return nil, false, fmt.Errorf("parsing image platform: %v", err) - } - - if platforms.NewMatcher(expected).Match(fromImage) { - return nil, customPlatform, nil - } - - return fmt.Errorf("image platform (%s) does not match the expected platform (%s)", platforms.Format(fromImage), platforms.Format(expected)), customPlatform, nil + return platform.Normalize(rawOS, rawArch, rawVariant) } diff --git a/vendor/github.com/containers/common/libimage/platform/platform.go b/vendor/github.com/containers/common/libimage/platform/platform.go new file mode 100644 index 00000000..2272cee7 --- /dev/null +++ b/vendor/github.com/containers/common/libimage/platform/platform.go @@ -0,0 +1,57 @@ +package platform + +import ( + "fmt" + "runtime" + + "github.com/containerd/containerd/platforms" + v1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" +) + +// Normalize normalizes (according to the OCI spec) the specified os, +// arch and variant. If left empty, the individual item will be normalized. +func Normalize(rawOS, rawArch, rawVariant string) (os, arch, variant string) { + platformSpec := v1.Platform{ + OS: rawOS, + Architecture: rawArch, + Variant: rawVariant, + } + normalizedSpec := platforms.Normalize(platformSpec) + if normalizedSpec.Variant == "" && rawVariant != "" { + normalizedSpec.Variant = rawVariant + } + rawPlatform := ToString(normalizedSpec.OS, normalizedSpec.Architecture, normalizedSpec.Variant) + normalizedPlatform, err := platforms.Parse(rawPlatform) + if err != nil { + logrus.Debugf("Error normalizing platform: %v", err) + return rawOS, rawArch, rawVariant + } + logrus.Debugf("Normalized platform %s to %s", rawPlatform, normalizedPlatform) + os = rawOS + if rawOS != "" { + os = normalizedPlatform.OS + } + arch = rawArch + if rawArch != "" { + arch = normalizedPlatform.Architecture + } + variant = rawVariant + if rawVariant != "" || (rawVariant == "" && normalizedPlatform.Variant != "") { + variant = normalizedPlatform.Variant + } + return os, arch, variant +} + +func ToString(os, arch, variant string) string { + if os == "" { + os = runtime.GOOS + } + if arch == "" { + arch = runtime.GOARCH + } + if variant == "" { + return fmt.Sprintf("%s/%s", os, arch) + } + return fmt.Sprintf("%s/%s/%s", os, arch, variant) +} diff --git a/vendor/github.com/containers/common/libimage/pull.go b/vendor/github.com/containers/common/libimage/pull.go index 579eae2c..bc8e8498 100644 --- a/vendor/github.com/containers/common/libimage/pull.go +++ b/vendor/github.com/containers/common/libimage/pull.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package libimage import ( diff --git a/vendor/github.com/containers/common/libimage/push.go b/vendor/github.com/containers/common/libimage/push.go index 7203838a..ed1d90c1 100644 --- a/vendor/github.com/containers/common/libimage/push.go +++ b/vendor/github.com/containers/common/libimage/push.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package libimage import ( diff --git a/vendor/github.com/containers/common/libimage/runtime.go b/vendor/github.com/containers/common/libimage/runtime.go index 6d90272c..1948fe0a 100644 --- a/vendor/github.com/containers/common/libimage/runtime.go +++ b/vendor/github.com/containers/common/libimage/runtime.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package libimage import ( @@ -7,6 +10,8 @@ import ( "os" "strings" + "github.com/containers/common/libimage/define" + "github.com/containers/common/libimage/platform" "github.com/containers/common/pkg/config" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/pkg/shortnames" @@ -184,7 +189,7 @@ type LookupImageOptions struct { Variant string // Controls the behavior when checking the platform of an image. - PlatformPolicy PlatformPolicy + PlatformPolicy define.PlatformPolicy // If set, do not look for items/instances in the manifest list that // match the current platform but return the manifest list as is. @@ -283,7 +288,7 @@ func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image, options.Variant = r.systemContext.VariantChoice } // Normalize platform to be OCI compatible (e.g., "aarch64" -> "arm64"). - options.OS, options.Architecture, options.Variant = NormalizePlatform(options.OS, options.Architecture, options.Variant) + options.OS, options.Architecture, options.Variant = platform.Normalize(options.OS, options.Architecture, options.Variant) // Second, try out the candidates as resolved by shortnames. This takes // "localhost/" prefixed images into account as well. @@ -435,9 +440,9 @@ func (r *Runtime) lookupImageInLocalStorage(name, candidate string, namedCandida return nil, nil } switch options.PlatformPolicy { - case PlatformPolicyDefault: + case define.PlatformPolicyDefault: logrus.Debugf("%v", matchError) - case PlatformPolicyWarn: + case define.PlatformPolicyWarn: logrus.Warnf("%v", matchError) } } diff --git a/vendor/github.com/containers/common/libimage/save.go b/vendor/github.com/containers/common/libimage/save.go index a42bbb49..47a3a566 100644 --- a/vendor/github.com/containers/common/libimage/save.go +++ b/vendor/github.com/containers/common/libimage/save.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package libimage import ( diff --git a/vendor/github.com/containers/common/libimage/search.go b/vendor/github.com/containers/common/libimage/search.go index 618850e6..9ef0e832 100644 --- a/vendor/github.com/containers/common/libimage/search.go +++ b/vendor/github.com/containers/common/libimage/search.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package libimage import ( diff --git a/vendor/github.com/containers/common/libnetwork/cni/network.go b/vendor/github.com/containers/common/libnetwork/cni/network.go index 8180b49b..49d20b91 100644 --- a/vendor/github.com/containers/common/libnetwork/cni/network.go +++ b/vendor/github.com/containers/common/libnetwork/cni/network.go @@ -9,6 +9,7 @@ import ( "encoding/hex" "errors" "fmt" + "io/fs" "os" "path/filepath" "strings" @@ -17,7 +18,7 @@ import ( "github.com/containernetworking/cni/libcni" "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/config" - cutil "github.com/containers/common/pkg/util" + "github.com/containers/common/pkg/version" "github.com/containers/storage/pkg/lockfile" "github.com/containers/storage/pkg/unshare" "github.com/sirupsen/logrus" @@ -201,7 +202,10 @@ func (n *cniNetwork) loadNetworks() error { net, err := createNetworkFromCNIConfigList(conf, file) if err != nil { - logrus.Errorf("CNI config list %s could not be converted to a libpod config, skipping: %v", file, err) + // ignore ENOENT as the config has been removed in the meantime so we can just ignore this case + if !errors.Is(err, fs.ErrNotExist) { + logrus.Errorf("CNI config list %s could not be converted to a libpod config, skipping: %v", file, err) + } continue } logrus.Debugf("Successfully loaded network %s: %v", net.Name, net) @@ -302,8 +306,8 @@ func (n *cniNetwork) NetworkInfo() types.NetworkInfo { path := "" packageVersion := "" for _, p := range n.cniPluginDirs { - ver := cutil.PackageVersion(p) - if ver != cutil.UnknownPackage { + ver := version.Package(p) + if ver != version.UnknownPackage { path = p packageVersion = ver break @@ -317,8 +321,8 @@ func (n *cniNetwork) NetworkInfo() types.NetworkInfo { } dnsPath := filepath.Join(path, "dnsname") - dnsPackage := cutil.PackageVersion(dnsPath) - dnsProgram, err := cutil.ProgramVersionDnsname(dnsPath) + dnsPackage := version.Package(dnsPath) + dnsProgram, err := version.ProgramDnsname(dnsPath) if err != nil { logrus.Infof("Failed to get the dnsname plugin version: %v", err) } diff --git a/vendor/github.com/containers/common/libnetwork/netavark/config.go b/vendor/github.com/containers/common/libnetwork/netavark/config.go index 45b49bf2..de7af957 100644 --- a/vendor/github.com/containers/common/libnetwork/netavark/config.go +++ b/vendor/github.com/containers/common/libnetwork/netavark/config.go @@ -204,7 +204,10 @@ func (n *netavarkNetwork) networkCreate(newNetwork *types.Network, defaultNet bo } // rust only support "true" or "false" while go can parse 1 and 0 as well so we need to change it newNetwork.Options[types.NoDefaultRoute] = strconv.FormatBool(val) - + case types.VRFOption: + if len(value) == 0 { + return nil, errors.New("invalid vrf name") + } default: return nil, fmt.Errorf("unsupported bridge network option %s", key) } diff --git a/vendor/github.com/containers/common/libnetwork/netavark/network.go b/vendor/github.com/containers/common/libnetwork/netavark/network.go index 17bd863a..0d323db2 100644 --- a/vendor/github.com/containers/common/libnetwork/netavark/network.go +++ b/vendor/github.com/containers/common/libnetwork/netavark/network.go @@ -15,7 +15,7 @@ import ( "github.com/containers/common/libnetwork/internal/util" "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/config" - cutil "github.com/containers/common/pkg/util" + "github.com/containers/common/pkg/version" "github.com/containers/storage/pkg/lockfile" "github.com/containers/storage/pkg/unshare" "github.com/sirupsen/logrus" @@ -53,7 +53,7 @@ type netavarkNetwork struct { // ipamDBPath is the path to the ip allocation bolt db ipamDBPath string - // syslog describes whenever the netavark debbug output should be log to the syslog as well. + // syslog describes whenever the netavark debug output should be log to the syslog as well. // This will use logrus to do so, make sure logrus is set up to log to the syslog. syslog bool @@ -93,7 +93,7 @@ type InitConfig struct { // PluginDirs list of directories were netavark plugins are located PluginDirs []string - // Syslog describes whenever the netavark debbug output should be log to the syslog as well. + // Syslog describes whenever the netavark debug output should be log to the syslog as well. // This will use logrus to do so, make sure logrus is set up to log to the syslog. Syslog bool } @@ -341,8 +341,8 @@ func (n *netavarkNetwork) DefaultInterfaceName() string { // package version and program version. func (n *netavarkNetwork) NetworkInfo() types.NetworkInfo { path := n.netavarkBinary - packageVersion := cutil.PackageVersion(path) - programVersion, err := cutil.ProgramVersion(path) + packageVersion := version.Package(path) + programVersion, err := version.Program(path) if err != nil { logrus.Infof("Failed to get the netavark version: %v", err) } @@ -354,8 +354,8 @@ func (n *netavarkNetwork) NetworkInfo() types.NetworkInfo { } dnsPath := n.aardvarkBinary - dnsPackage := cutil.PackageVersion(dnsPath) - dnsProgram, err := cutil.ProgramVersion(dnsPath) + dnsPackage := version.Package(dnsPath) + dnsProgram, err := version.Program(dnsPath) if err != nil { logrus.Infof("Failed to get the aardvark version: %v", err) } diff --git a/vendor/github.com/containers/common/libnetwork/network/interface.go b/vendor/github.com/containers/common/libnetwork/network/interface.go index a717775a..aeac8d9c 100644 --- a/vendor/github.com/containers/common/libnetwork/network/interface.go +++ b/vendor/github.com/containers/common/libnetwork/network/interface.go @@ -81,7 +81,7 @@ func NetworkBackend(store storage.Store, conf *config.Config, syslog bool) (type NetworkRunDir: runDir, NetavarkBinary: netavarkBin, AardvarkBinary: aardvarkBin, - PluginDirs: conf.Network.NetavarkPluginDirs, + PluginDirs: conf.Network.NetavarkPluginDirs.Get(), DefaultNetwork: conf.Network.DefaultNetwork, DefaultSubnet: conf.Network.DefaultSubnet, DefaultsubnetPools: conf.Network.DefaultSubnetPools, @@ -181,7 +181,7 @@ func getCniInterface(conf *config.Config) (types.ContainerNetwork, error) { } return cni.NewCNINetworkInterface(&cni.InitConfig{ CNIConfigDir: confDir, - CNIPluginDirs: conf.Network.CNIPluginDirs, + CNIPluginDirs: conf.Network.CNIPluginDirs.Get(), RunDir: conf.Engine.TmpDir, DefaultNetwork: conf.Network.DefaultNetwork, DefaultSubnet: conf.Network.DefaultSubnet, diff --git a/vendor/github.com/containers/common/libnetwork/pasta/pasta.go b/vendor/github.com/containers/common/libnetwork/pasta/pasta.go new file mode 100644 index 00000000..b787a781 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/pasta/pasta.go @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: Apache-2.0 +// +// pasta.go - Start pasta(1) for user-mode connectivity +// +// Copyright (c) 2022 Red Hat GmbH +// Author: Stefano Brivio + +// This file has been imported from the podman repository +// (libpod/networking_pasta_linux.go), for the full history see there. + +package pasta + +import ( + "errors" + "fmt" + "os/exec" + "strings" + + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/pkg/config" + "github.com/sirupsen/logrus" +) + +const ( + BinaryName = "pasta" +) + +type SetupOptions struct { + // Config used to get pasta options and binary path via HelperBinariesDir + Config *config.Config + // Netns is the path to the container Netns + Netns string + // Ports that should be forwarded in the container + Ports []types.PortMapping + // ExtraOptions are pasta(1) cli options, these will be appended after the + // pasta options from containers.conf to allow some form of overwrite. + ExtraOptions []string +} + +// Setup start the pasta process for the given netns. +// The pasta binary is looked up in the HelperBinariesDir and $PATH. +// Note that there is no need any special cleanup logic, the pasta process will +// automatically exit when the netns path is deleted. +func Setup(opts *SetupOptions) error { + NoTCPInitPorts := true + NoUDPInitPorts := true + NoTCPNamespacePorts := true + NoUDPNamespacePorts := true + NoMapGW := true + + path, err := opts.Config.FindHelperBinary(BinaryName, true) + if err != nil { + return fmt.Errorf("could not find pasta, the network namespace can't be configured: %w", err) + } + + cmdArgs := []string{} + cmdArgs = append(cmdArgs, "--config-net") + + for _, i := range opts.Ports { + protocols := strings.Split(i.Protocol, ",") + for _, protocol := range protocols { + var addr string + + if i.HostIP != "" { + addr = fmt.Sprintf("%s/", i.HostIP) + } + + switch protocol { + case "tcp": + cmdArgs = append(cmdArgs, "-t") + case "udp": + cmdArgs = append(cmdArgs, "-u") + default: + return fmt.Errorf("can't forward protocol: %s", protocol) + } + + arg := fmt.Sprintf("%s%d-%d:%d-%d", addr, + i.HostPort, + i.HostPort+i.Range-1, + i.ContainerPort, + i.ContainerPort+i.Range-1) + cmdArgs = append(cmdArgs, arg) + } + } + + // first append options set in the config + cmdArgs = append(cmdArgs, opts.Config.Network.PastaOptions.Get()...) + // then append the ones that were set on the cli + cmdArgs = append(cmdArgs, opts.ExtraOptions...) + + for i, opt := range cmdArgs { + switch opt { + case "-t", "--tcp-ports": + NoTCPInitPorts = false + case "-u", "--udp-ports": + NoUDPInitPorts = false + case "-T", "--tcp-ns": + NoTCPNamespacePorts = false + case "-U", "--udp-ns": + NoUDPNamespacePorts = false + case "--map-gw": + NoMapGW = false + // not an actual pasta(1) option + cmdArgs = append(cmdArgs[:i], cmdArgs[i+1:]...) + } + } + + if NoTCPInitPorts { + cmdArgs = append(cmdArgs, "-t", "none") + } + if NoUDPInitPorts { + cmdArgs = append(cmdArgs, "-u", "none") + } + if NoTCPNamespacePorts { + cmdArgs = append(cmdArgs, "-T", "none") + } + if NoUDPNamespacePorts { + cmdArgs = append(cmdArgs, "-U", "none") + } + if NoMapGW { + cmdArgs = append(cmdArgs, "--no-map-gw") + } + + cmdArgs = append(cmdArgs, "--netns", opts.Netns) + + logrus.Debugf("pasta arguments: %s", strings.Join(cmdArgs, " ")) + + // pasta forks once ready, and quits once we delete the target namespace + _, err = exec.Command(path, cmdArgs...).Output() + if err != nil { + exitErr := &exec.ExitError{} + if errors.As(err, &exitErr) { + return fmt.Errorf("pasta failed with exit code %d:\n%s", + exitErr.ExitCode(), exitErr.Stderr) + } + return fmt.Errorf("failed to start pasta: %w", err) + } + + return nil +} diff --git a/vendor/github.com/containers/common/libnetwork/slirp4netns/const.go b/vendor/github.com/containers/common/libnetwork/slirp4netns/const.go new file mode 100644 index 00000000..9dc0c262 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/slirp4netns/const.go @@ -0,0 +1,12 @@ +package slirp4netns + +const ( + ipv6ConfDefaultAcceptDadSysctl = "/proc/sys/net/ipv6/conf/default/accept_dad" + BinaryName = "slirp4netns" + + // defaultMTU the default MTU override + defaultMTU = 65520 + + // default slirp4ns subnet + defaultSubnet = "10.0.2.0/24" +) diff --git a/vendor/github.com/containers/common/libnetwork/slirp4netns/slirp4netns.go b/vendor/github.com/containers/common/libnetwork/slirp4netns/slirp4netns.go new file mode 100644 index 00000000..43ca9780 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/slirp4netns/slirp4netns.go @@ -0,0 +1,752 @@ +//go:build linux +// +build linux + +package slirp4netns + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "net" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/containernetworking/plugins/pkg/ns" + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/pkg/config" + "github.com/containers/common/pkg/rootlessport" + "github.com/containers/common/pkg/servicereaper" + "github.com/containers/common/pkg/util" + "github.com/sirupsen/logrus" +) + +type slirpFeatures struct { + HasDisableHostLoopback bool + HasMTU bool + HasEnableSandbox bool + HasEnableSeccomp bool + HasCIDR bool + HasOutboundAddr bool + HasIPv6 bool +} + +type slirp4netnsCmdArg struct { + Proto string `json:"proto,omitempty"` + HostAddr string `json:"host_addr"` + HostPort uint16 `json:"host_port"` + GuestAddr string `json:"guest_addr"` + GuestPort uint16 `json:"guest_port"` +} + +type slirp4netnsCmd struct { + Execute string `json:"execute"` + Args slirp4netnsCmdArg `json:"arguments"` +} + +type networkOptions struct { + cidr string + disableHostLoopback bool + enableIPv6 bool + isSlirpHostForward bool + noPivotRoot bool + mtu int + outboundAddr string + outboundAddr6 string +} + +type SetupOptions struct { + // Config used to get slip4netns path and other default options + Config *config.Config + // ContainerID is the ID of the container + ContainerID string + // Netns path to the netns + Netns string + // Ports the should be forwarded + Ports []types.PortMapping + // ExtraOptions for slirp4netns that were set on the cli + ExtraOptions []string + // Slirp4netnsExitPipeR pipe used to exit the slirp4netns process. + // This is must be the reading end, the writer must be kept open until you want the + // process to exit. For podman, conmon will hold the pipe open. + // It can be set to nil in which case we do not use the pipe exit and the caller + // must use the returned pid to kill the process after it is done. + Slirp4netnsExitPipeR *os.File + // RootlessPortSyncPipe pipe used to exit the rootlessport process. + // Same as Slirp4netnsExitPipeR, except this is only used when ports are given. + RootlessPortExitPipeR *os.File + // Pdeathsig is the signal which is send to slirp4netns process if the calling thread + // exits. The caller is responsible for locking the thread with runtime.LockOSThread(). + Pdeathsig syscall.Signal +} + +// SetupResult return type from Setup() +type SetupResult struct { + // Pid of the created slirp4netns process + Pid int + // Subnet which is used by slirp4netns + Subnet *net.IPNet + // IPv6 whenever Ipv6 is enabled in slirp4netns + IPv6 bool +} + +type logrusDebugWriter struct { + prefix string +} + +func (w *logrusDebugWriter) Write(p []byte) (int, error) { + logrus.Debugf("%s%s", w.prefix, string(p)) + return len(p), nil +} + +func checkSlirpFlags(path string) (*slirpFeatures, error) { + cmd := exec.Command(path, "--help") + out, err := cmd.CombinedOutput() + if err != nil { + return nil, fmt.Errorf("slirp4netns %q: %w", out, err) + } + return &slirpFeatures{ + HasDisableHostLoopback: strings.Contains(string(out), "--disable-host-loopback"), + HasMTU: strings.Contains(string(out), "--mtu"), + HasEnableSandbox: strings.Contains(string(out), "--enable-sandbox"), + HasEnableSeccomp: strings.Contains(string(out), "--enable-seccomp"), + HasCIDR: strings.Contains(string(out), "--cidr"), + HasOutboundAddr: strings.Contains(string(out), "--outbound-addr"), + HasIPv6: strings.Contains(string(out), "--enable-ipv6"), + }, nil +} + +func parseNetworkOptions(config *config.Config, extraOptions []string) (*networkOptions, error) { + options := make([]string, 0, len(config.Engine.NetworkCmdOptions.Get())+len(extraOptions)) + options = append(options, config.Engine.NetworkCmdOptions.Get()...) + options = append(options, extraOptions...) + opts := &networkOptions{ + // overwrite defaults + disableHostLoopback: true, + mtu: defaultMTU, + noPivotRoot: config.Engine.NoPivotRoot, + enableIPv6: true, + } + for _, o := range options { + parts := strings.SplitN(o, "=", 2) + if len(parts) < 2 { + return nil, fmt.Errorf("unknown option for slirp4netns: %q", o) + } + option, value := parts[0], parts[1] + switch option { + case "cidr": + ipv4, _, err := net.ParseCIDR(value) + if err != nil || ipv4.To4() == nil { + return nil, fmt.Errorf("invalid cidr %q", value) + } + opts.cidr = value + case "port_handler": + switch value { + case "slirp4netns": + opts.isSlirpHostForward = true + case "rootlesskit": + opts.isSlirpHostForward = false + default: + return nil, fmt.Errorf("unknown port_handler for slirp4netns: %q", value) + } + case "allow_host_loopback": + switch value { + case "true": + opts.disableHostLoopback = false + case "false": + opts.disableHostLoopback = true + default: + return nil, fmt.Errorf("invalid value of allow_host_loopback for slirp4netns: %q", value) + } + case "enable_ipv6": + switch value { + case "true": + opts.enableIPv6 = true + case "false": + opts.enableIPv6 = false + default: + return nil, fmt.Errorf("invalid value of enable_ipv6 for slirp4netns: %q", value) + } + case "outbound_addr": + ipv4 := net.ParseIP(value) + if ipv4 == nil || ipv4.To4() == nil { + _, err := net.InterfaceByName(value) + if err != nil { + return nil, fmt.Errorf("invalid outbound_addr %q", value) + } + } + opts.outboundAddr = value + case "outbound_addr6": + ipv6 := net.ParseIP(value) + if ipv6 == nil || ipv6.To4() != nil { + _, err := net.InterfaceByName(value) + if err != nil { + return nil, fmt.Errorf("invalid outbound_addr6: %q", value) + } + } + opts.outboundAddr6 = value + case "mtu": + var err error + opts.mtu, err = strconv.Atoi(value) + if opts.mtu < 68 || err != nil { + return nil, fmt.Errorf("invalid mtu %q", value) + } + default: + return nil, fmt.Errorf("unknown option for slirp4netns: %q", o) + } + } + return opts, nil +} + +func createBasicSlirpCmdArgs(options *networkOptions, features *slirpFeatures) ([]string, error) { + cmdArgs := []string{} + if options.disableHostLoopback && features.HasDisableHostLoopback { + cmdArgs = append(cmdArgs, "--disable-host-loopback") + } + if options.mtu > -1 && features.HasMTU { + cmdArgs = append(cmdArgs, fmt.Sprintf("--mtu=%d", options.mtu)) + } + if !options.noPivotRoot && features.HasEnableSandbox { + cmdArgs = append(cmdArgs, "--enable-sandbox") + } + if features.HasEnableSeccomp { + cmdArgs = append(cmdArgs, "--enable-seccomp") + } + + if options.cidr != "" { + if !features.HasCIDR { + return nil, fmt.Errorf("cidr not supported") + } + cmdArgs = append(cmdArgs, fmt.Sprintf("--cidr=%s", options.cidr)) + } + + if options.enableIPv6 { + if !features.HasIPv6 { + return nil, fmt.Errorf("enable_ipv6 not supported") + } + cmdArgs = append(cmdArgs, "--enable-ipv6") + } + + if options.outboundAddr != "" { + if !features.HasOutboundAddr { + return nil, fmt.Errorf("outbound_addr not supported") + } + cmdArgs = append(cmdArgs, fmt.Sprintf("--outbound-addr=%s", options.outboundAddr)) + } + + if options.outboundAddr6 != "" { + if !features.HasOutboundAddr || !features.HasIPv6 { + return nil, fmt.Errorf("outbound_addr6 not supported") + } + if !options.enableIPv6 { + return nil, fmt.Errorf("enable_ipv6=true is required for outbound_addr6") + } + cmdArgs = append(cmdArgs, fmt.Sprintf("--outbound-addr6=%s", options.outboundAddr6)) + } + + return cmdArgs, nil +} + +// Setup can be called in rootful as well as in rootless. +// Spawns the slirp4netns process and setup port forwarding if ports are given. +func Setup(opts *SetupOptions) (*SetupResult, error) { + path := opts.Config.Engine.NetworkCmdPath + if path == "" { + var err error + path, err = opts.Config.FindHelperBinary(BinaryName, true) + if err != nil { + return nil, fmt.Errorf("could not find slirp4netns, the network namespace can't be configured: %w", err) + } + } + + syncR, syncW, err := os.Pipe() + if err != nil { + return nil, fmt.Errorf("failed to open pipe: %w", err) + } + defer closeQuiet(syncR) + defer closeQuiet(syncW) + + havePortMapping := len(opts.Ports) > 0 + logPath := filepath.Join(opts.Config.Engine.TmpDir, fmt.Sprintf("slirp4netns-%s.log", opts.ContainerID)) + + netOptions, err := parseNetworkOptions(opts.Config, opts.ExtraOptions) + if err != nil { + return nil, err + } + slirpFeatures, err := checkSlirpFlags(path) + if err != nil { + return nil, fmt.Errorf("checking slirp4netns binary %s: %q: %w", path, err, err) + } + cmdArgs, err := createBasicSlirpCmdArgs(netOptions, slirpFeatures) + if err != nil { + return nil, err + } + + // the slirp4netns arguments being passed are described as follows: + // from the slirp4netns documentation: https://github.com/rootless-containers/slirp4netns + // -c, --configure Brings up the tap interface + // -e, --exit-fd=FD specify the FD for terminating slirp4netns + // -r, --ready-fd=FD specify the FD to write to when the initialization steps are finished + cmdArgs = append(cmdArgs, "-c", "-r", "3") + if opts.Slirp4netnsExitPipeR != nil { + cmdArgs = append(cmdArgs, "-e", "4") + } + + var apiSocket string + if havePortMapping && netOptions.isSlirpHostForward { + apiSocket = filepath.Join(opts.Config.Engine.TmpDir, fmt.Sprintf("%s.net", opts.ContainerID)) + cmdArgs = append(cmdArgs, "--api-socket", apiSocket) + } + + cmdArgs = append(cmdArgs, "--netns-type=path", opts.Netns, "tap0") + + cmd := exec.Command(path, cmdArgs...) + logrus.Debugf("slirp4netns command: %s", strings.Join(cmd.Args, " ")) + cmd.SysProcAttr = &syscall.SysProcAttr{ + Setpgid: true, + Pdeathsig: opts.Pdeathsig, + } + + // workaround for https://github.com/rootless-containers/slirp4netns/pull/153 + if !netOptions.noPivotRoot && slirpFeatures.HasEnableSandbox { + cmd.SysProcAttr.Cloneflags = syscall.CLONE_NEWNS + cmd.SysProcAttr.Unshareflags = syscall.CLONE_NEWNS + } + + // Leak one end of the pipe in slirp4netns, the other will be sent to conmon + cmd.ExtraFiles = append(cmd.ExtraFiles, syncW) + if opts.Slirp4netnsExitPipeR != nil { + cmd.ExtraFiles = append(cmd.ExtraFiles, opts.Slirp4netnsExitPipeR) + } + + logFile, err := os.Create(logPath) + if err != nil { + return nil, fmt.Errorf("failed to open slirp4netns log file %s: %w", logPath, err) + } + defer logFile.Close() + // Unlink immediately the file so we won't need to worry about cleaning it up later. + // It is still accessible through the open fd logFile. + if err := os.Remove(logPath); err != nil { + return nil, fmt.Errorf("delete file %s: %w", logPath, err) + } + cmd.Stdout = logFile + cmd.Stderr = logFile + + var slirpReadyWg, netnsReadyWg *sync.WaitGroup + if netOptions.enableIPv6 { + // use two wait groups to make sure we set the sysctl before + // starting slirp and reset it only after slirp is ready + slirpReadyWg = &sync.WaitGroup{} + netnsReadyWg = &sync.WaitGroup{} + slirpReadyWg.Add(1) + netnsReadyWg.Add(1) + + go func() { + err := ns.WithNetNSPath(opts.Netns, func(_ ns.NetNS) error { + // Duplicate Address Detection slows the ipv6 setup down for 1-2 seconds. + // Since slirp4netns is run in its own namespace and not directly routed + // we can skip this to make the ipv6 address immediately available. + // We change the default to make sure the slirp tap interface gets the + // correct value assigned so DAD is disabled for it + // Also make sure to change this value back to the original after slirp4netns + // is ready in case users rely on this sysctl. + orgValue, err := os.ReadFile(ipv6ConfDefaultAcceptDadSysctl) + if err != nil { + netnsReadyWg.Done() + // on ipv6 disabled systems the sysctl does not exist + // so we should not error + if errors.Is(err, os.ErrNotExist) { + return nil + } + return err + } + err = os.WriteFile(ipv6ConfDefaultAcceptDadSysctl, []byte("0"), 0o644) + netnsReadyWg.Done() + if err != nil { + return err + } + + // wait until slirp4nets is ready before resetting this value + slirpReadyWg.Wait() + return os.WriteFile(ipv6ConfDefaultAcceptDadSysctl, orgValue, 0o644) + }) + if err != nil { + logrus.Warnf("failed to set net.ipv6.conf.default.accept_dad sysctl: %v", err) + } + }() + + // wait until we set the sysctl + netnsReadyWg.Wait() + } + + if err := cmd.Start(); err != nil { + if netOptions.enableIPv6 { + slirpReadyWg.Done() + } + return nil, fmt.Errorf("failed to start slirp4netns process: %w", err) + } + defer func() { + servicereaper.AddPID(cmd.Process.Pid) + if err := cmd.Process.Release(); err != nil { + logrus.Errorf("Unable to release command process: %q", err) + } + }() + + err = waitForSync(syncR, cmd, logFile, 1*time.Second) + if netOptions.enableIPv6 { + slirpReadyWg.Done() + } + if err != nil { + return nil, err + } + + // Set a default slirp subnet. Parsing a string with the net helper is easier than building the struct myself + _, slirpSubnet, _ := net.ParseCIDR(defaultSubnet) + + // Set slirp4netnsSubnet addresses now that we are pretty sure the command executed + if netOptions.cidr != "" { + ipv4, ipv4network, err := net.ParseCIDR(netOptions.cidr) + if err != nil || ipv4.To4() == nil { + return nil, fmt.Errorf("invalid cidr %q", netOptions.cidr) + } + slirpSubnet = ipv4network + } + + if havePortMapping { + if netOptions.isSlirpHostForward { + err = setupRootlessPortMappingViaSlirp(opts.Ports, cmd, apiSocket) + } else { + err = SetupRootlessPortMappingViaRLK(opts, slirpSubnet, nil) + } + if err != nil { + return nil, err + } + } + + return &SetupResult{ + Pid: cmd.Process.Pid, + Subnet: slirpSubnet, + IPv6: netOptions.enableIPv6, + }, nil +} + +// Get expected slirp ipv4 address based on subnet. If subnet is null use default subnet +// Reference: https://github.com/rootless-containers/slirp4netns/blob/master/slirp4netns.1.md#description +func GetIP(subnet *net.IPNet) (*net.IP, error) { + _, slirpSubnet, _ := net.ParseCIDR(defaultSubnet) + if subnet != nil { + slirpSubnet = subnet + } + expectedIP, err := addToIP(slirpSubnet, uint32(100)) + if err != nil { + return nil, fmt.Errorf("calculating expected ip for slirp4netns: %w", err) + } + return expectedIP, nil +} + +// Get expected slirp Gateway ipv4 address based on subnet +// Reference: https://github.com/rootless-containers/slirp4netns/blob/master/slirp4netns.1.md#description +func GetGateway(subnet *net.IPNet) (*net.IP, error) { + _, slirpSubnet, _ := net.ParseCIDR(defaultSubnet) + if subnet != nil { + slirpSubnet = subnet + } + expectedGatewayIP, err := addToIP(slirpSubnet, uint32(2)) + if err != nil { + return nil, fmt.Errorf("calculating expected gateway ip for slirp4netns: %w", err) + } + return expectedGatewayIP, nil +} + +// Get expected slirp DNS ipv4 address based on subnet +// Reference: https://github.com/rootless-containers/slirp4netns/blob/master/slirp4netns.1.md#description +func GetDNS(subnet *net.IPNet) (*net.IP, error) { + _, slirpSubnet, _ := net.ParseCIDR(defaultSubnet) + if subnet != nil { + slirpSubnet = subnet + } + expectedDNSIP, err := addToIP(slirpSubnet, uint32(3)) + if err != nil { + return nil, fmt.Errorf("calculating expected dns ip for slirp4netns: %w", err) + } + return expectedDNSIP, nil +} + +// Helper function to calculate slirp ip address offsets +// Adapted from: https://github.com/signalsciences/ipv4/blob/master/int.go#L12-L24 +func addToIP(subnet *net.IPNet, offset uint32) (*net.IP, error) { + // I have no idea why I have to do this, but if I don't ip is 0 + ipFixed := subnet.IP.To4() + + ipInteger := uint32(ipFixed[3]) | uint32(ipFixed[2])<<8 | uint32(ipFixed[1])<<16 | uint32(ipFixed[0])<<24 + ipNewRaw := ipInteger + offset + // Avoid overflows + if ipNewRaw < ipInteger { + return nil, fmt.Errorf("integer overflow while calculating ip address offset, %s + %d", ipFixed, offset) + } + ipNew := net.IPv4(byte(ipNewRaw>>24), byte(ipNewRaw>>16&0xFF), byte(ipNewRaw>>8)&0xFF, byte(ipNewRaw&0xFF)) + if !subnet.Contains(ipNew) { + return nil, fmt.Errorf("calculated ip address %s is not within given subnet %s", ipNew.String(), subnet.String()) + } + return &ipNew, nil +} + +func waitForSync(syncR *os.File, cmd *exec.Cmd, logFile io.ReadSeeker, timeout time.Duration) error { + prog := filepath.Base(cmd.Path) + if len(cmd.Args) > 0 { + prog = cmd.Args[0] + } + b := make([]byte, 16) + for { + if err := syncR.SetDeadline(time.Now().Add(timeout)); err != nil { + return fmt.Errorf("setting %s pipe timeout: %w", prog, err) + } + // FIXME: return err as soon as proc exits, without waiting for timeout + _, err := syncR.Read(b) + if err == nil { + break + } + if errors.Is(err, os.ErrDeadlineExceeded) { + // Check if the process is still running. + var status syscall.WaitStatus + pid, err := syscall.Wait4(cmd.Process.Pid, &status, syscall.WNOHANG, nil) + if err != nil { + return fmt.Errorf("failed to read %s process status: %w", prog, err) + } + if pid != cmd.Process.Pid { + continue + } + if status.Exited() { + // Seek at the beginning of the file and read all its content + if _, err := logFile.Seek(0, 0); err != nil { + logrus.Errorf("Could not seek log file: %q", err) + } + logContent, err := io.ReadAll(logFile) + if err != nil { + return fmt.Errorf("%s failed: %w", prog, err) + } + return fmt.Errorf("%s failed: %q", prog, logContent) + } + if status.Signaled() { + return fmt.Errorf("%s killed by signal", prog) + } + continue + } + return fmt.Errorf("failed to read from %s sync pipe: %w", prog, err) + } + return nil +} + +func SetupRootlessPortMappingViaRLK(opts *SetupOptions, slirpSubnet *net.IPNet, netStatus map[string]types.StatusBlock) error { + syncR, syncW, err := os.Pipe() + if err != nil { + return fmt.Errorf("failed to open pipe: %w", err) + } + defer closeQuiet(syncR) + defer closeQuiet(syncW) + + logPath := filepath.Join(opts.Config.Engine.TmpDir, fmt.Sprintf("rootlessport-%s.log", opts.ContainerID)) + logFile, err := os.Create(logPath) + if err != nil { + return fmt.Errorf("failed to open rootlessport log file %s: %w", logPath, err) + } + defer logFile.Close() + // Unlink immediately the file so we won't need to worry about cleaning it up later. + // It is still accessible through the open fd logFile. + if err := os.Remove(logPath); err != nil { + return fmt.Errorf("delete file %s: %w", logPath, err) + } + + childIP := GetRootlessPortChildIP(slirpSubnet, netStatus) + cfg := rootlessport.Config{ + Mappings: opts.Ports, + NetNSPath: opts.Netns, + ExitFD: 3, + ReadyFD: 4, + TmpDir: opts.Config.Engine.TmpDir, + ChildIP: childIP, + ContainerID: opts.ContainerID, + RootlessCNI: netStatus != nil, + } + cfgJSON, err := json.Marshal(cfg) + if err != nil { + return err + } + cfgR := bytes.NewReader(cfgJSON) + var stdout bytes.Buffer + path, err := opts.Config.FindHelperBinary(rootlessport.BinaryName, false) + if err != nil { + return err + } + cmd := exec.Command(path) + cmd.Args = []string{rootlessport.BinaryName} + + // Leak one end of the pipe in rootlessport process, the other will be sent to conmon + cmd.ExtraFiles = append(cmd.ExtraFiles, opts.RootlessPortExitPipeR, syncW) + cmd.Stdin = cfgR + // stdout is for human-readable error, stderr is for debug log + cmd.Stdout = &stdout + cmd.Stderr = io.MultiWriter(logFile, &logrusDebugWriter{"rootlessport: "}) + cmd.SysProcAttr = &syscall.SysProcAttr{ + Setpgid: true, + } + if err := cmd.Start(); err != nil { + return fmt.Errorf("failed to start rootlessport process: %w", err) + } + defer func() { + servicereaper.AddPID(cmd.Process.Pid) + if err := cmd.Process.Release(); err != nil { + logrus.Errorf("Unable to release rootlessport process: %q", err) + } + }() + if err := waitForSync(syncR, cmd, logFile, 3*time.Second); err != nil { + stdoutStr := stdout.String() + if stdoutStr != "" { + // err contains full debug log and too verbose, so return stdoutStr + logrus.Debug(err) + return fmt.Errorf("rootlessport " + strings.TrimSuffix(stdoutStr, "\n")) + } + return err + } + logrus.Debug("rootlessport is ready") + return nil +} + +func setupRootlessPortMappingViaSlirp(ports []types.PortMapping, cmd *exec.Cmd, apiSocket string) (err error) { + const pidWaitTimeout = 60 * time.Second + chWait := make(chan error) + go func() { + interval := 25 * time.Millisecond + for i := time.Duration(0); i < pidWaitTimeout; i += interval { + // Check if the process is still running. + var status syscall.WaitStatus + pid, err := syscall.Wait4(cmd.Process.Pid, &status, syscall.WNOHANG, nil) + if err != nil { + break + } + if pid != cmd.Process.Pid { + continue + } + if status.Exited() || status.Signaled() { + chWait <- fmt.Errorf("slirp4netns exited with status %d", status.ExitStatus()) + } + time.Sleep(interval) + } + }() + defer close(chWait) + + // wait that API socket file appears before trying to use it. + if _, err := util.WaitForFile(apiSocket, chWait, pidWaitTimeout); err != nil { + return fmt.Errorf("waiting for slirp4nets to create the api socket file %s: %w", apiSocket, err) + } + + // for each port we want to add we need to open a connection to the slirp4netns control socket + // and send the add_hostfwd command. + for _, port := range ports { + protocols := strings.Split(port.Protocol, ",") + for _, protocol := range protocols { + hostIP := port.HostIP + if hostIP == "" { + hostIP = "0.0.0.0" + } + for i := uint16(0); i < port.Range; i++ { + if err := openSlirp4netnsPort(apiSocket, protocol, hostIP, port.HostPort+i, port.ContainerPort+i); err != nil { + return err + } + } + } + } + logrus.Debug("slirp4netns port-forwarding setup via add_hostfwd is ready") + return nil +} + +// openSlirp4netnsPort sends the slirp4netns pai quey to the given socket +func openSlirp4netnsPort(apiSocket, proto, hostip string, hostport, guestport uint16) error { + conn, err := net.Dial("unix", apiSocket) + if err != nil { + return fmt.Errorf("cannot open connection to %s: %w", apiSocket, err) + } + defer func() { + if err := conn.Close(); err != nil { + logrus.Errorf("Unable to close slirp4netns connection: %q", err) + } + }() + apiCmd := slirp4netnsCmd{ + Execute: "add_hostfwd", + Args: slirp4netnsCmdArg{ + Proto: proto, + HostAddr: hostip, + HostPort: hostport, + GuestPort: guestport, + }, + } + // create the JSON payload and send it. Mark the end of request shutting down writes + // to the socket, as requested by slirp4netns. + data, err := json.Marshal(&apiCmd) + if err != nil { + return fmt.Errorf("cannot marshal JSON for slirp4netns: %w", err) + } + if _, err := conn.Write([]byte(fmt.Sprintf("%s\n", data))); err != nil { + return fmt.Errorf("cannot write to control socket %s: %w", apiSocket, err) + } + if err := conn.(*net.UnixConn).CloseWrite(); err != nil { + return fmt.Errorf("cannot shutdown the socket %s: %w", apiSocket, err) + } + buf := make([]byte, 2048) + readLength, err := conn.Read(buf) + if err != nil { + return fmt.Errorf("cannot read from control socket %s: %w", apiSocket, err) + } + // if there is no 'error' key in the received JSON data, then the operation was + // successful. + var y map[string]interface{} + if err := json.Unmarshal(buf[0:readLength], &y); err != nil { + return fmt.Errorf("parsing error status from slirp4netns: %w", err) + } + if e, found := y["error"]; found { + return fmt.Errorf("from slirp4netns while setting up port redirection: %v", e) + } + return nil +} + +func GetRootlessPortChildIP(slirpSubnet *net.IPNet, netStatus map[string]types.StatusBlock) string { + if slirpSubnet != nil { + slirp4netnsIP, err := GetIP(slirpSubnet) + if err != nil { + return "" + } + return slirp4netnsIP.String() + } + + var ipv6 net.IP + for _, status := range netStatus { + for _, netInt := range status.Interfaces { + for _, netAddress := range netInt.Subnets { + ipv4 := netAddress.IPNet.IP.To4() + if ipv4 != nil { + return ipv4.String() + } + ipv6 = netAddress.IPNet.IP + } + } + } + if ipv6 != nil { + return ipv6.String() + } + return "" +} + +// closeQuiet closes a file and logs any error. Should only be used within +// a defer. +func closeQuiet(f *os.File) { + if err := f.Close(); err != nil { + logrus.Errorf("Unable to close file %s: %q", f.Name(), err) + } +} diff --git a/vendor/github.com/containers/common/libnetwork/types/const.go b/vendor/github.com/containers/common/libnetwork/types/const.go index 83103ef6..a9161820 100644 --- a/vendor/github.com/containers/common/libnetwork/types/const.go +++ b/vendor/github.com/containers/common/libnetwork/types/const.go @@ -43,6 +43,7 @@ const ( MetricOption = "metric" NoDefaultRoute = "no_default_route" BclimOption = "bclim" + VRFOption = "vrf" ) type NetworkBackend string diff --git a/vendor/github.com/containers/common/libnetwork/util/filters.go b/vendor/github.com/containers/common/libnetwork/util/filters.go index 782c5d2b..70f90918 100644 --- a/vendor/github.com/containers/common/libnetwork/util/filters.go +++ b/vendor/github.com/containers/common/libnetwork/util/filters.go @@ -38,7 +38,7 @@ func createFilterFuncs(key string, filterValues []string) (types.FilterFunc, err case "id": // matches part of one id return func(net types.Network) bool { - return util.FilterID(net.ID, filterValues) + return filters.FilterID(net.ID, filterValues) }, nil // TODO: add dns enabled, internal filter @@ -67,7 +67,7 @@ func createPruneFilterFuncs(key string, filterValues []string) (types.FilterFunc }, nil case "label!": return func(net types.Network) bool { - return !filters.MatchLabelFilters(filterValues, net.Labels) + return filters.MatchNegatedLabelFilters(filterValues, net.Labels) }, nil case "until": until, err := filters.ComputeUntilTimestamp(filterValues) diff --git a/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go b/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go index 7ba63ba7..435422c2 100644 --- a/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go +++ b/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go @@ -212,6 +212,11 @@ func parseAAParserVersion(output string) (int, error) { words := strings.Split(lines[0], " ") version := words[len(words)-1] + // trim "-beta1" suffix from version="3.0.0-beta1" if exists + version = strings.SplitN(version, "-", 2)[0] + // also trim "~..." suffix used historically (https://gitlab.com/apparmor/apparmor/-/commit/bca67d3d27d219d11ce8c9cc70612bd637f88c10) + version = strings.SplitN(version, "~", 2)[0] + // split by major minor version v := strings.Split(version, ".") if len(v) == 0 || len(v) > 3 { diff --git a/vendor/github.com/containers/common/pkg/auth/auth.go b/vendor/github.com/containers/common/pkg/auth/auth.go new file mode 100644 index 00000000..6536d0f2 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/auth/auth.go @@ -0,0 +1,402 @@ +package auth + +import ( + "bufio" + "context" + "errors" + "fmt" + "net/url" + "os" + "path/filepath" + "strings" + + passwd "github.com/containers/common/pkg/password" + "github.com/containers/image/v5/docker" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/pkg/docker/config" + "github.com/containers/image/v5/pkg/sysregistriesv2" + "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/homedir" + "github.com/sirupsen/logrus" +) + +// ErrNewCredentialsInvalid means that the new user-provided credentials are +// not accepted by the registry. +type ErrNewCredentialsInvalid struct { + underlyingError error + message string +} + +// Error returns the error message as a string. +func (e ErrNewCredentialsInvalid) Error() string { + return e.message +} + +// Unwrap returns the underlying error. +func (e ErrNewCredentialsInvalid) Unwrap() error { + return e.underlyingError +} + +// GetDefaultAuthFile returns env value REGISTRY_AUTH_FILE as default +// --authfile path used in multiple --authfile flag definitions +// Will fail over to DOCKER_CONFIG if REGISTRY_AUTH_FILE environment is not set +// +// WARNINGS: +// - In almost all invocations, expect this function to return ""; so it can not be used +// for directly accessing the file. +// - Use this only for commands that _read_ credentials, not write them. +// The path may refer to github.com/containers auth.json, or to Docker config.json, +// and the distinction is lost; writing auth.json data to config.json may not be consumable by Docker, +// or it may overwrite and discard unrelated Docker configuration set by the user. +func GetDefaultAuthFile() string { + // Keep this in sync with the default logic in systemContextWithOptions! + + if authfile := os.Getenv("REGISTRY_AUTH_FILE"); authfile != "" { + return authfile + } + // This pre-existing behavior is not conceptually consistent: + // If users have a ~/.docker/config.json in the default path, and no environment variable + // set, we read auth.json first, falling back to config.json; + // but if DOCKER_CONFIG is set, we read only config.json in that path, and we don’t read auth.json at all. + if authEnv := os.Getenv("DOCKER_CONFIG"); authEnv != "" { + return filepath.Join(authEnv, "config.json") + } + return "" +} + +// CheckAuthFile validates a path option, failing if the option is set but the referenced file is not accessible. +func CheckAuthFile(pathOption string) error { + if pathOption == "" { + return nil + } + if _, err := os.Stat(pathOption); err != nil { + return fmt.Errorf("credential file is not accessible: %w", err) + } + return nil +} + +// systemContextWithOptions returns a version of sys +// updated with authFile, dockerCompatAuthFile and certDir values (if they are not ""). +// NOTE: this is a shallow copy that can be used and updated, but may share +// data with the original parameter. +func systemContextWithOptions(sys *types.SystemContext, authFile, dockerCompatAuthFile, certDir string) (*types.SystemContext, error) { + if sys != nil { + sysCopy := *sys + sys = &sysCopy + } else { + sys = &types.SystemContext{} + } + + defaultDockerConfigPath := filepath.Join(homedir.Get(), ".docker", "config.json") + switch { + case authFile != "" && dockerCompatAuthFile != "": + return nil, errors.New("options for paths to the credential file and to the Docker-compatible credential file can not be set simultaneously") + case authFile != "": + if authFile == defaultDockerConfigPath { + logrus.Warn("saving credentials to ~/.docker/config.json, but not using Docker-compatible file format") + } + sys.AuthFilePath = authFile + case dockerCompatAuthFile != "": + sys.DockerCompatAuthFilePath = dockerCompatAuthFile + default: + // Keep this in sync with GetDefaultAuthFile()! + // + // Note that c/image does not natively implement the REGISTRY_AUTH_FILE + // variable, so not all callers look for credentials in this location. + if authFileVar := os.Getenv("REGISTRY_AUTH_FILE"); authFileVar != "" { + if authFileVar == defaultDockerConfigPath { + logrus.Warn("$REGISTRY_AUTH_FILE points to ~/.docker/config.json, but the file format is not fully compatible; use the Docker-compatible file path option instead") + } + sys.AuthFilePath = authFileVar + } else if dockerConfig := os.Getenv("DOCKER_CONFIG"); dockerConfig != "" { + // This preserves pre-existing _inconsistent_ behavior: + // If the Docker configuration exists in the default ~/.docker/config.json location, + // we DO NOT write to it; instead, we update auth.json in the default path. + // Only if the user explicitly sets DOCKER_CONFIG, we write to that config.json. + sys.DockerCompatAuthFilePath = filepath.Join(dockerConfig, "config.json") + } + } + if certDir != "" { + sys.DockerCertPath = certDir + } + return sys, nil +} + +// Login implements a “log in†command with the provided opts and args +// reading the password from opts.Stdin or the options in opts. +func Login(ctx context.Context, systemContext *types.SystemContext, opts *LoginOptions, args []string) error { + systemContext, err := systemContextWithOptions(systemContext, opts.AuthFile, opts.DockerCompatAuthFile, opts.CertDir) + if err != nil { + return err + } + + var key, registry string + switch len(args) { + case 0: + if !opts.AcceptUnspecifiedRegistry { + return errors.New("please provide a registry to log in to") + } + if key, err = defaultRegistryWhenUnspecified(systemContext); err != nil { + return err + } + registry = key + logrus.Debugf("registry not specified, default to the first registry %q from registries.conf", key) + + case 1: + key, registry, err = parseCredentialsKey(args[0], opts.AcceptRepositories) + if err != nil { + return err + } + + default: + return errors.New("login accepts only one registry to log in to") + } + + authConfig, err := config.GetCredentials(systemContext, key) + if err != nil { + return fmt.Errorf("get credentials: %w", err) + } + + if opts.GetLoginSet { + if authConfig.Username == "" { + return fmt.Errorf("not logged into %s", key) + } + fmt.Fprintf(opts.Stdout, "%s\n", authConfig.Username) + return nil + } + if authConfig.IdentityToken != "" { + return errors.New("currently logged in, auth file contains an Identity token") + } + + password := opts.Password + if opts.StdinPassword { + var stdinPasswordStrBuilder strings.Builder + if opts.Password != "" { + return errors.New("Can't specify both --password-stdin and --password") + } + if opts.Username == "" { + return errors.New("Must provide --username with --password-stdin") + } + scanner := bufio.NewScanner(opts.Stdin) + for scanner.Scan() { + fmt.Fprint(&stdinPasswordStrBuilder, scanner.Text()) + } + password = stdinPasswordStrBuilder.String() + } + + // If no username and no password is specified, try to use existing ones. + if opts.Username == "" && password == "" && authConfig.Username != "" && authConfig.Password != "" { + fmt.Fprintf(opts.Stdout, "Authenticating with existing credentials for %s\n", key) + if err := docker.CheckAuth(ctx, systemContext, authConfig.Username, authConfig.Password, registry); err == nil { + fmt.Fprintf(opts.Stdout, "Existing credentials are valid. Already logged in to %s\n", registry) + return nil + } + fmt.Fprintln(opts.Stdout, "Existing credentials are invalid, please enter valid username and password") + } + + username, password, err := getUserAndPass(opts, password, authConfig.Username) + if err != nil { + return fmt.Errorf("getting username and password: %w", err) + } + + if err = docker.CheckAuth(ctx, systemContext, username, password, registry); err == nil { + if !opts.NoWriteBack { + // Write the new credentials to the authfile + desc, err := config.SetCredentials(systemContext, key, username, password) + if err != nil { + return err + } + if opts.Verbose { + fmt.Fprintln(opts.Stdout, "Used: ", desc) + } + } + fmt.Fprintln(opts.Stdout, "Login Succeeded!") + return nil + } + if unauthorized, ok := err.(docker.ErrUnauthorizedForCredentials); ok { + logrus.Debugf("error logging into %q: %v", key, unauthorized) + return ErrNewCredentialsInvalid{ + underlyingError: err, + message: fmt.Sprintf("logging into %q: invalid username/password", key), + } + } + return fmt.Errorf("authenticating creds for %q: %w", key, err) +} + +// parseCredentialsKey turns the provided argument into a valid credential key +// and computes the registry part. +func parseCredentialsKey(arg string, acceptRepositories bool) (key, registry string, err error) { + // URL arguments are replaced with their host[:port] parts. + key, err = replaceURLByHostPort(arg) + if err != nil { + return "", "", err + } + + split := strings.Split(key, "/") + registry = split[0] + + if !acceptRepositories { + return registry, registry, nil + } + + // Return early if the key isn't namespaced or uses an http{s} prefix. + if registry == key { + return key, registry, nil + } + + // Sanity-check that the key looks reasonable (e.g. doesn't use invalid characters), + // and does not contain a tag or digest. + // WARNING: ref.Named() MUST NOT be used to compute key, because + // reference.ParseNormalizedNamed() turns docker.io/vendor to docker.io/library/vendor + // Ideally c/image should provide dedicated validation functionality. + ref, err := reference.ParseNormalizedNamed(key) + if err != nil { + return "", "", fmt.Errorf("parse reference from %q: %w", key, err) + } + if !reference.IsNameOnly(ref) { + return "", "", fmt.Errorf("reference %q contains tag or digest", ref.String()) + } + refRegistry := reference.Domain(ref) + if refRegistry != registry { // This should never happen, check just to make sure + return "", "", fmt.Errorf("internal error: key %q registry mismatch, %q vs. %q", key, ref, refRegistry) + } + + return key, registry, nil +} + +// If the specified string starts with http{s} it is replaced with it's +// host[:port] parts; everything else is stripped. Otherwise, the string is +// returned as is. +func replaceURLByHostPort(repository string) (string, error) { + if !strings.HasPrefix(repository, "https://") && !strings.HasPrefix(repository, "http://") { + return repository, nil + } + u, err := url.Parse(repository) + if err != nil { + return "", fmt.Errorf("trimming http{s} prefix: %v", err) + } + return u.Host, nil +} + +// getUserAndPass gets the username and password from STDIN if not given +// using the -u and -p flags. If the username prompt is left empty, the +// displayed userFromAuthFile will be used instead. +func getUserAndPass(opts *LoginOptions, password, userFromAuthFile string) (user, pass string, err error) { + username := opts.Username + if username == "" { + if opts.Stdin == nil { + return "", "", fmt.Errorf("cannot prompt for username without stdin") + } + + if userFromAuthFile != "" { + fmt.Fprintf(opts.Stdout, "Username (%s): ", userFromAuthFile) + } else { + fmt.Fprint(opts.Stdout, "Username: ") + } + + reader := bufio.NewReader(opts.Stdin) + username, err = reader.ReadString('\n') + if err != nil { + return "", "", fmt.Errorf("reading username: %w", err) + } + // If the user just hit enter, use the displayed user from + // the authentication file. This allows to do a lazy + // `$ buildah login -p $NEW_PASSWORD` without specifying the + // user. + if strings.TrimSpace(username) == "" { + username = userFromAuthFile + } + } + if password == "" { + fmt.Fprint(opts.Stdout, "Password: ") + pass, err := passwd.Read(int(os.Stdin.Fd())) + if err != nil { + return "", "", fmt.Errorf("reading password: %w", err) + } + password = string(pass) + fmt.Fprintln(opts.Stdout) + } + return strings.TrimSpace(username), password, err +} + +// Logout implements a “log out†command with the provided opts and args +func Logout(systemContext *types.SystemContext, opts *LogoutOptions, args []string) error { + if err := CheckAuthFile(opts.AuthFile); err != nil { + return err + } + if err := CheckAuthFile(opts.DockerCompatAuthFile); err != nil { + return err + } + systemContext, err := systemContextWithOptions(systemContext, opts.AuthFile, opts.DockerCompatAuthFile, "") + if err != nil { + return err + } + + if opts.All { + if len(args) != 0 { + return errors.New("--all takes no arguments") + } + if err := config.RemoveAllAuthentication(systemContext); err != nil { + return err + } + fmt.Fprintln(opts.Stdout, "Removed login credentials for all registries") + return nil + } + + var key, registry string + switch len(args) { + case 0: + if !opts.AcceptUnspecifiedRegistry { + return errors.New("please provide a registry to log out from") + } + if key, err = defaultRegistryWhenUnspecified(systemContext); err != nil { + return err + } + registry = key + logrus.Debugf("registry not specified, default to the first registry %q from registries.conf", key) + + case 1: + key, registry, err = parseCredentialsKey(args[0], opts.AcceptRepositories) + if err != nil { + return err + } + + default: + return errors.New("logout accepts only one registry to log out from") + } + + err = config.RemoveAuthentication(systemContext, key) + if err == nil { + fmt.Fprintf(opts.Stdout, "Removed login credentials for %s\n", key) + return nil + } + + if errors.Is(err, config.ErrNotLoggedIn) { + authConfig, err := config.GetCredentials(systemContext, key) + if err != nil { + return fmt.Errorf("get credentials: %w", err) + } + + authInvalid := docker.CheckAuth(context.Background(), systemContext, authConfig.Username, authConfig.Password, registry) + if authConfig.Username != "" && authConfig.Password != "" && authInvalid == nil { + fmt.Printf("Not logged into %s with current tool. Existing credentials were established via docker login. Please use docker logout instead.\n", key) //nolint:forbidigo + return nil + } + return fmt.Errorf("not logged into %s", key) + } + + return fmt.Errorf("logging out of %q: %w", key, err) +} + +// defaultRegistryWhenUnspecified returns first registry from search list of registry.conf +// used by login/logout when registry argument is not specified +func defaultRegistryWhenUnspecified(systemContext *types.SystemContext) (string, error) { + registriesFromFile, err := sysregistriesv2.UnqualifiedSearchRegistries(systemContext) + if err != nil { + return "", fmt.Errorf("getting registry from registry.conf, please specify a registry: %w", err) + } + if len(registriesFromFile) == 0 { + return "", errors.New("no registries found in registries.conf, a registry must be provided") + } + return registriesFromFile[0], nil +} diff --git a/vendor/github.com/containers/common/pkg/auth/cli.go b/vendor/github.com/containers/common/pkg/auth/cli.go new file mode 100644 index 00000000..60e02e51 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/auth/cli.go @@ -0,0 +1,87 @@ +package auth + +import ( + "io" + + "github.com/containers/common/pkg/completion" + "github.com/spf13/pflag" +) + +// LoginOptions represents common flags in login +// In addition, the caller should probably provide a --tls-verify flag (that affects the provided +// *types.SystemContest) +type LoginOptions struct { + // CLI flags managed by the FlagSet returned by GetLoginFlags + // Callers that use GetLoginFlags should not need to touch these values at all; callers that use + // other CLI frameworks should set them based on user input. + AuthFile string + DockerCompatAuthFile string + CertDir string + Password string + Username string + StdinPassword bool + GetLoginSet bool + Verbose bool // set to true for verbose output + AcceptRepositories bool // set to true to allow namespaces or repositories rather than just registries + // Options caller can set + Stdin io.Reader // set to os.Stdin + Stdout io.Writer // set to os.Stdout + AcceptUnspecifiedRegistry bool // set to true if allows login with unspecified registry + NoWriteBack bool // set to true to not write the credentials to the authfile/cred helpers +} + +// LogoutOptions represents the results for flags in logout +type LogoutOptions struct { + // CLI flags managed by the FlagSet returned by GetLogoutFlags + // Callers that use GetLogoutFlags should not need to touch these values at all; callers that use + // other CLI frameworks should set them based on user input. + AuthFile string + DockerCompatAuthFile string + All bool + AcceptRepositories bool // set to true to allow namespaces or repositories rather than just registries + // Options caller can set + Stdout io.Writer // set to os.Stdout + AcceptUnspecifiedRegistry bool // set to true if allows logout with unspecified registry +} + +// GetLoginFlags defines and returns login flags for containers tools +func GetLoginFlags(flags *LoginOptions) *pflag.FlagSet { + fs := pflag.FlagSet{} + fs.StringVar(&flags.AuthFile, "authfile", "", "path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override") + fs.StringVar(&flags.DockerCompatAuthFile, "compat-auth-file", "", "path of a Docker-compatible config file to update instead") + fs.StringVar(&flags.CertDir, "cert-dir", "", "use certificates at the specified path to access the registry") + fs.StringVarP(&flags.Password, "password", "p", "", "Password for registry") + fs.StringVarP(&flags.Username, "username", "u", "", "Username for registry") + fs.BoolVar(&flags.StdinPassword, "password-stdin", false, "Take the password from stdin") + fs.BoolVar(&flags.GetLoginSet, "get-login", false, "Return the current login user for the registry") + fs.BoolVarP(&flags.Verbose, "verbose", "v", false, "Write more detailed information to stdout") + return &fs +} + +// GetLoginFlagsCompletions returns the FlagCompletions for the login flags +func GetLoginFlagsCompletions() completion.FlagCompletions { + flagCompletion := completion.FlagCompletions{} + flagCompletion["authfile"] = completion.AutocompleteDefault + flagCompletion["compat-auth-file"] = completion.AutocompleteDefault + flagCompletion["cert-dir"] = completion.AutocompleteDefault + flagCompletion["password"] = completion.AutocompleteNone + flagCompletion["username"] = completion.AutocompleteNone + return flagCompletion +} + +// GetLogoutFlags defines and returns logout flags for containers tools +func GetLogoutFlags(flags *LogoutOptions) *pflag.FlagSet { + fs := pflag.FlagSet{} + fs.StringVar(&flags.AuthFile, "authfile", "", "path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override") + fs.StringVar(&flags.DockerCompatAuthFile, "compat-auth-file", "", "path of a Docker-compatible config file to update instead") + fs.BoolVarP(&flags.All, "all", "a", false, "Remove the cached credentials for all registries in the auth file") + return &fs +} + +// GetLogoutFlagsCompletions returns the FlagCompletions for the logout flags +func GetLogoutFlagsCompletions() completion.FlagCompletions { + flagCompletion := completion.FlagCompletions{} + flagCompletion["authfile"] = completion.AutocompleteDefault + flagCompletion["compat-auth-file"] = completion.AutocompleteDefault + return flagCompletion +} diff --git a/vendor/github.com/containers/common/pkg/completion/completion.go b/vendor/github.com/containers/common/pkg/completion/completion.go new file mode 100644 index 00000000..908d568f --- /dev/null +++ b/vendor/github.com/containers/common/pkg/completion/completion.go @@ -0,0 +1,155 @@ +package completion + +import ( + "bufio" + "os" + "strings" + "unicode" + + "github.com/containers/common/pkg/capabilities" + "github.com/spf13/cobra" +) + +// FlagCompletions - hold flag completion functions to be applied later with CompleteCommandFlags() +type FlagCompletions map[string]func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) + +// CompleteCommandFlags - Add completion functions for each flagname in FlagCompletions. +func CompleteCommandFlags(cmd *cobra.Command, flags FlagCompletions) { + for flagName, completionFunc := range flags { + _ = cmd.RegisterFlagCompletionFunc(flagName, completionFunc) + } +} + +/* Autocomplete Functions for cobra ValidArgsFunction */ + +// AutocompleteNone - Block the default shell completion (no paths) +func AutocompleteNone(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) { + return nil, cobra.ShellCompDirectiveNoFileComp +} + +// AutocompleteDefault - Use the default shell completion, +// allows path completion. +func AutocompleteDefault(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) { + return nil, cobra.ShellCompDirectiveDefault +} + +// AutocompleteCapabilities - Autocomplete linux capabilities options. +// Used by --cap-add and --cap-drop. +func AutocompleteCapabilities(_ *cobra.Command, _ []string, toComplete string) ([]string, cobra.ShellCompDirective) { + caps := capabilities.AllCapabilities() + + // convertCase will convert a string to lowercase only if the user input is lowercase + convertCase := func(s string) string { return s } + if len(toComplete) > 0 && unicode.IsLower(rune(toComplete[0])) { + convertCase = strings.ToLower + } + + // offset is used to trim "CAP_" if the user doesn't type CA... or ca... + offset := 0 + if !strings.HasPrefix(toComplete, convertCase("CA")) { + // setting the offset to 4 is safe since each cap starts with CAP_ + offset = 4 + } + + completions := make([]string, 0, len(caps)) + for _, cap := range caps { + completions = append(completions, convertCase(cap)[offset:]) + } + + // add ALL here which is also a valid argument + completions = append(completions, convertCase(capabilities.All)) + return completions, cobra.ShellCompDirectiveNoFileComp +} + +// autocompleteSubIDName - autocomplete the names in /etc/subuid or /etc/subgid +func autocompleteSubIDName(filename string) ([]string, cobra.ShellCompDirective) { + file, err := os.Open(filename) + if err != nil { + return nil, cobra.ShellCompDirectiveError + } + defer file.Close() + + var names []string + scanner := bufio.NewScanner(file) + for scanner.Scan() { + name := strings.SplitN(scanner.Text(), ":", 2)[0] + names = append(names, name) + } + if err = scanner.Err(); err != nil { + return nil, cobra.ShellCompDirectiveError + } + + return names, cobra.ShellCompDirectiveNoFileComp +} + +// AutocompleteSubgidName - Autocomplete subgidname based on the names in the /etc/subgid file. +func AutocompleteSubgidName(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) { + return autocompleteSubIDName("/etc/subgid") +} + +// AutocompleteSubuidName - Autocomplete subuidname based on the names in the /etc/subuid file. +func AutocompleteSubuidName(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) { + return autocompleteSubIDName("/etc/subuid") +} + +// AutocompleteArch - Autocomplete platform supported by container engines +func AutocompletePlatform(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) { + completions := []string{ + "linux/386", + "linux/amd64", + "linux/arm", + "linux/arm64", + "linux/ppc64", + "linux/ppc64le", + "linux/mips", + "linux/mipsle", + "linux/mips64", + "linux/mips64le", + "linux/riscv64", + "linux/s390x", + "windows/386", + "windows/amd64", + "windows/arm", + } + return completions, cobra.ShellCompDirectiveNoFileComp +} + +// AutocompleteArch - Autocomplete architectures supported by container engines +func AutocompleteArch(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) { + completions := []string{ + "386", + "amd64", + "arm", + "arm64", + "ppc64", + "ppc64le", + "mips", + "mipsle", + "mips64", + "mips64le", + "riscv64", + "s390x", + } + + return completions, cobra.ShellCompDirectiveNoFileComp +} + +// AutocompleteOS - Autocomplete OS supported by container engines +func AutocompleteOS(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) { + completions := []string{"linux", "windows"} + return completions, cobra.ShellCompDirectiveNoFileComp +} + +// AutocompleteJSONFormat - Autocomplete format flag option. +// -> "json" +func AutocompleteJSONFormat(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) { + return []string{"json"}, cobra.ShellCompDirectiveNoFileComp +} + +// AutocompleteOneArg - Autocomplete one random arg +func AutocompleteOneArg(_ *cobra.Command, args []string, _ string) ([]string, cobra.ShellCompDirective) { + if len(args) == 1 { + return nil, cobra.ShellCompDirectiveDefault + } + return nil, cobra.ShellCompDirectiveNoFileComp +} diff --git a/vendor/github.com/containers/common/pkg/config/config.go b/vendor/github.com/containers/common/pkg/config/config.go index 8bdccd4e..75b917f0 100644 --- a/vendor/github.com/containers/common/pkg/config/config.go +++ b/vendor/github.com/containers/common/pkg/config/config.go @@ -10,6 +10,7 @@ import ( "strings" "github.com/BurntSushi/toml" + "github.com/containers/common/internal/attributedstring" "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/capabilities" "github.com/containers/common/pkg/util" @@ -30,24 +31,6 @@ const ( bindirPrefix = "$BINDIR" ) -// RuntimeStateStore is a constant indicating which state store implementation -// should be used by engine -type RuntimeStateStore int - -const ( - // InvalidStateStore is an invalid state store - InvalidStateStore RuntimeStateStore = iota - // InMemoryStateStore is an in-memory state that will not persist data - // on containers and pods between engine instances or after system - // reboot - InMemoryStateStore RuntimeStateStore = iota - // SQLiteStateStore is a state backed by a SQLite database - // It is presently disabled - SQLiteStateStore RuntimeStateStore = iota - // BoltDBStateStore is a state backed by a BoltDB database - BoltDBStateStore RuntimeStateStore = iota -) - var validImageVolumeModes = []string{_typeBind, "tmpfs", "ignore"} // ProxyEnv is a list of Proxy Environment variables @@ -86,17 +69,17 @@ type Config struct { // containers global options for containers tools type ContainersConfig struct { // Devices to add to all containers - Devices []string `toml:"devices,omitempty"` + Devices attributedstring.Slice `toml:"devices,omitempty"` // Volumes to add to all containers - Volumes []string `toml:"volumes,omitempty"` + Volumes attributedstring.Slice `toml:"volumes,omitempty"` // ApparmorProfile is the apparmor profile name which is used as the // default for the runtime. ApparmorProfile string `toml:"apparmor_profile,omitempty"` // Annotation to add to all containers - Annotations []string `toml:"annotations,omitempty"` + Annotations attributedstring.Slice `toml:"annotations,omitempty"` // BaseHostsFile is the path to a hosts file, the entries from this file // are added to the containers hosts file. As special value "image" is @@ -113,28 +96,28 @@ type ContainersConfig struct { // CgroupConf entries specifies a list of cgroup files to write to and their values. For example // "memory.high=1073741824" sets the memory.high limit to 1GB. - CgroupConf []string `toml:"cgroup_conf,omitempty"` + CgroupConf attributedstring.Slice `toml:"cgroup_conf,omitempty"` // Capabilities to add to all containers. - DefaultCapabilities []string `toml:"default_capabilities,omitempty"` + DefaultCapabilities attributedstring.Slice `toml:"default_capabilities,omitempty"` // Sysctls to add to all containers. - DefaultSysctls []string `toml:"default_sysctls,omitempty"` + DefaultSysctls attributedstring.Slice `toml:"default_sysctls,omitempty"` // DefaultUlimits specifies the default ulimits to apply to containers - DefaultUlimits []string `toml:"default_ulimits,omitempty"` + DefaultUlimits attributedstring.Slice `toml:"default_ulimits,omitempty"` // DefaultMountsFile is the path to the default mounts file for testing DefaultMountsFile string `toml:"-"` // DNSServers set default DNS servers. - DNSServers []string `toml:"dns_servers,omitempty"` + DNSServers attributedstring.Slice `toml:"dns_servers,omitempty"` // DNSOptions set default DNS options. - DNSOptions []string `toml:"dns_options,omitempty"` + DNSOptions attributedstring.Slice `toml:"dns_options,omitempty"` // DNSSearches set default DNS search domains. - DNSSearches []string `toml:"dns_searches,omitempty"` + DNSSearches attributedstring.Slice `toml:"dns_searches,omitempty"` // EnableKeyring tells the container engines whether to create // a kernel keyring for use within the container @@ -151,7 +134,7 @@ type ContainersConfig struct { EnableLabeledUsers bool `toml:"label_users,omitempty"` // Env is the environment variable list for container process. - Env []string `toml:"env,omitempty"` + Env attributedstring.Slice `toml:"env,omitempty"` // EnvHost Pass all host environment variables into the container. EnvHost bool `toml:"env_host,omitempty"` @@ -167,6 +150,8 @@ type ContainersConfig struct { Init bool `toml:"init,omitempty"` // InitPath is the path for init to run if the Init bool is enabled + // + // Deprecated: Do not use this field directly use conf.FindInitBinary() instead. InitPath string `toml:"init_path,omitempty"` // IPCNS way to create a ipc namespace for the container @@ -187,7 +172,7 @@ type ContainersConfig struct { LogTag string `toml:"log_tag,omitempty"` // Mount to add to all containers - Mounts []string `toml:"mounts,omitempty"` + Mounts attributedstring.Slice `toml:"mounts,omitempty"` // NetNS indicates how to create a network namespace for the container NetNS string `toml:"netns,omitempty"` @@ -213,6 +198,18 @@ type ContainersConfig struct { // performance implications. PrepareVolumeOnCreate bool `toml:"prepare_volume_on_create,omitempty"` + // Give extended privileges to all containers. A privileged container + // turns off the security features that isolate the container from the + // host. Dropped Capabilities, limited devices, read-only mount points, + // Apparmor/SELinux separation, and Seccomp filters are all disabled. + // Due to the disabled security features the privileged field should + // almost never be set as containers can easily break out of + // confinment. + // + // Containers running in a user namespace (e.g., rootless containers) + // cannot have more privileges than the user that launched them. + Privileged bool `toml:"privileged,omitempty"` + // ReadOnly causes engine to run all containers with root file system mounted read-only ReadOnly bool `toml:"read_only,omitempty"` @@ -254,15 +251,15 @@ type EngineConfig struct { // ConmonEnvVars are environment variables to pass to the Conmon binary // when it is launched. - ConmonEnvVars []string `toml:"conmon_env_vars,omitempty"` + ConmonEnvVars attributedstring.Slice `toml:"conmon_env_vars,omitempty"` // ConmonPath is the path to the Conmon binary used for managing containers. // The first path pointing to a valid file will be used. - ConmonPath []string `toml:"conmon_path,omitempty"` + ConmonPath attributedstring.Slice `toml:"conmon_path,omitempty"` // ConmonRsPath is the path to the Conmon-rs binary used for managing containers. // The first path pointing to a valid file will be used. - ConmonRsPath []string `toml:"conmonrs_path,omitempty"` + ConmonRsPath attributedstring.Slice `toml:"conmonrs_path,omitempty"` // CompatAPIEnforceDockerHub enforces using docker.io for completing // short names in Podman's compatibility REST API. Note that this will @@ -274,7 +271,7 @@ type EngineConfig struct { // compose command. The first found provider is used for execution. // Can be an absolute and relative path or a (file) name. Make sure to // expand the return items via `os.ExpandEnv`. - ComposeProviders []string `toml:"compose_providers,omitempty"` + ComposeProviders attributedstring.Slice `toml:"compose_providers,omitempty"` // ComposeWarningLogs emits logs on each invocation of the compose // command indicating that an external compose provider is being @@ -297,7 +294,7 @@ type EngineConfig struct { EnablePortReservation bool `toml:"enable_port_reservation,omitempty"` // Environment variables to be used when running the container engine (e.g., Podman, Buildah). For example "http_proxy=internal.proxy.company.com" - Env []string `toml:"env,omitempty"` + Env attributedstring.Slice `toml:"env,omitempty"` // EventsLogFilePath is where the events log is stored. EventsLogFilePath string `toml:"events_logfile_path,omitempty"` @@ -319,12 +316,12 @@ type EngineConfig struct { // HelperBinariesDir is a list of directories which are used to search for // helper binaries. - HelperBinariesDir []string `toml:"helper_binaries_dir"` + HelperBinariesDir attributedstring.Slice `toml:"helper_binaries_dir,omitempty"` // configuration files. When the same filename is present in // multiple directories, the file in the directory listed last in // this slice takes precedence. - HooksDir []string `toml:"hooks_dir,omitempty"` + HooksDir attributedstring.Slice `toml:"hooks_dir,omitempty"` // ImageBuildFormat (DEPRECATED) indicates the default image format to // building container images. Should use ImageDefaultFormat @@ -357,6 +354,8 @@ type EngineConfig struct { InfraImage string `toml:"infra_image,omitempty"` // InitPath is the path to the container-init binary. + // + // Deprecated: Do not use this field directly use conf.FindInitBinary() instead. InitPath string `toml:"init_path,omitempty"` // KubeGenerateType sets the Kubernetes kind/specification to generate by default @@ -389,7 +388,7 @@ type EngineConfig struct { // NetworkCmdOptions is the default options to pass to the slirp4netns binary. // For example "allow_host_loopback=true" - NetworkCmdOptions []string `toml:"network_cmd_options,omitempty"` + NetworkCmdOptions attributedstring.Slice `toml:"network_cmd_options,omitempty"` // NoPivotRoot sets whether to set no-pivot-root in the OCI runtime. NoPivotRoot bool `toml:"no_pivot_root,omitempty"` @@ -429,7 +428,7 @@ type EngineConfig struct { ActiveService string `toml:"active_service,omitempty"` // Add existing instances with requested compression algorithms to manifest list - AddCompression []string `toml:"add_compression,omitempty"` + AddCompression attributedstring.Slice `toml:"add_compression,omitempty"` // ServiceDestinations mapped by service Names ServiceDestinations map[string]Destination `toml:"service_destinations,omitempty"` @@ -441,19 +440,19 @@ type EngineConfig struct { // The first path pointing to a valid file will be used This is used only // when there are no OCIRuntime/OCIRuntimes defined. It is used only to be // backward compatible with older versions of Podman. - RuntimePath []string `toml:"runtime_path,omitempty"` + RuntimePath attributedstring.Slice `toml:"runtime_path,omitempty"` // RuntimeSupportsJSON is the list of the OCI runtimes that support // --format=json. - RuntimeSupportsJSON []string `toml:"runtime_supports_json,omitempty"` + RuntimeSupportsJSON attributedstring.Slice `toml:"runtime_supports_json,omitempty"` // RuntimeSupportsNoCgroups is a list of OCI runtimes that support // running containers without CGroups. - RuntimeSupportsNoCgroups []string `toml:"runtime_supports_nocgroup,omitempty"` + RuntimeSupportsNoCgroups attributedstring.Slice `toml:"runtime_supports_nocgroup,omitempty"` // RuntimeSupportsKVM is a list of OCI runtimes that support // KVM separation for containers. - RuntimeSupportsKVM []string `toml:"runtime_supports_kvm,omitempty"` + RuntimeSupportsKVM attributedstring.Slice `toml:"runtime_supports_kvm,omitempty"` // SetOptions contains a subset of config options. It's used to indicate if // a given option has either been set by the user or by the parsed @@ -471,13 +470,6 @@ type EngineConfig struct { // readiness using the SD_NOTIFY mechanism. SDNotify bool `toml:"-"` - // StateType is the type of the backing state store. Avoid using multiple - // values for this with the same containers/storage configuration on the - // same system. Different state types do not interact, and each will see a - // separate set of containers, which may cause conflicts in - // containers/storage. As such this is not exposed via the config file. - StateType RuntimeStateStore `toml:"-"` - // ServiceTimeout is the number of seconds to wait without a connection // before the `podman system service` times out and exits ServiceTimeout uint `toml:"service_timeout,omitempty,omitzero"` @@ -561,24 +553,6 @@ type SetOptions struct { // backwards compatibility with older versions of libpod for which we must // query the database configuration. Not included in the on-disk config. StorageConfigGraphDriverNameSet bool `toml:"-"` - - // StaticDirSet indicates if the StaticDir has been explicitly set by the - // config or by the user. It's required to guarantee backwards compatibility - // with older versions of libpod for which we must query the database - // configuration. Not included in the on-disk config. - StaticDirSet bool `toml:"-"` - - // VolumePathSet indicates if the VolumePath has been explicitly set by the - // config or by the user. It's required to guarantee backwards compatibility - // with older versions of libpod for which we must query the database - // configuration. Not included in the on-disk config. - VolumePathSet bool `toml:"-"` - - // TmpDirSet indicates if the TmpDir has been explicitly set by the config - // or by the user. It's required to guarantee backwards compatibility with - // older versions of libpod for which we must query the database - // configuration. Not included in the on-disk config. - TmpDirSet bool `toml:"-"` } // NetworkConfig represents the "network" TOML config table @@ -588,10 +562,10 @@ type NetworkConfig struct { NetworkBackend string `toml:"network_backend,omitempty"` // CNIPluginDirs is where CNI plugin binaries are stored. - CNIPluginDirs []string `toml:"cni_plugin_dirs,omitempty"` + CNIPluginDirs attributedstring.Slice `toml:"cni_plugin_dirs,omitempty"` // NetavarkPluginDirs is a list of directories which contain netavark plugins. - NetavarkPluginDirs []string `toml:"netavark_plugin_dirs,omitempty"` + NetavarkPluginDirs attributedstring.Slice `toml:"netavark_plugin_dirs,omitempty"` // DefaultNetwork is the network name of the default network // to attach pods to. @@ -624,7 +598,7 @@ type NetworkConfig struct { // PastaOptions contains a default list of pasta(1) options that should // be used when running pasta. - PastaOptions []string `toml:"pasta_options,omitempty"` + PastaOptions attributedstring.Slice `toml:"pasta_options,omitempty"` } type SubnetPool struct { @@ -675,12 +649,12 @@ type MachineConfig struct { // User to use for rootless podman when init-ing a podman machine VM User string `toml:"user,omitempty"` // Volumes are host directories mounted into the VM by default. - Volumes []string `toml:"volumes"` + Volumes attributedstring.Slice `toml:"volumes,omitempty"` // Provider is the virtualization provider used to run podman-machine VM Provider string `toml:"provider,omitempty"` } -// FarmConfig represents the "farm" TOML config tabls +// FarmConfig represents the "farm" TOML config tables type FarmConfig struct { // Default is the default farm to be used when farming out builds Default string `toml:"default,omitempty"` @@ -740,12 +714,15 @@ func (c *Config) CheckCgroupsAndAdjustConfig() { } func (c *Config) addCAPPrefix() { - for i, val := range c.Containers.DefaultCapabilities { + caps := c.Containers.DefaultCapabilities.Get() + newCaps := make([]string, 0, len(caps)) + for _, val := range caps { if !strings.HasPrefix(strings.ToLower(val), "cap_") { val = "CAP_" + strings.ToUpper(val) } - c.Containers.DefaultCapabilities[i] = val + newCaps = append(newCaps, val) } + c.Containers.DefaultCapabilities.Set(newCaps) } // Validate is the main entry point for library configuration validation. @@ -880,7 +857,7 @@ func (c *NetworkConfig) Validate() error { // to first (version) matching conmon binary. If non is found, we try // to do a path lookup of "conmon". func (c *Config) FindConmon() (string, error) { - return findConmonPath(c.Engine.ConmonPath, "conmon") + return findConmonPath(c.Engine.ConmonPath.Get(), "conmon") } func findConmonPath(paths []string, binaryName string) (string, error) { @@ -910,7 +887,7 @@ func findConmonPath(paths []string, binaryName string) (string, error) { // to first (version) matching conmonrs binary. If non is found, we try // to do a path lookup of "conmonrs". func (c *Config) FindConmonRs() (string, error) { - return findConmonPath(c.Engine.ConmonRsPath, "conmonrs") + return findConmonPath(c.Engine.ConmonRsPath.Get(), "conmonrs") } // GetDefaultEnv returns the environment variables for the container. @@ -934,7 +911,7 @@ func (c *Config) GetDefaultEnvEx(envHost, httpProxy bool) []string { } } } - return append(env, c.Containers.Env...) + return append(env, c.Containers.Env.Get()...) } // Capabilities returns the capabilities parses the Add and Drop capability @@ -947,7 +924,7 @@ func (c *Config) Capabilities(user string, addCapabilities, dropCapabilities []s return true } - defaultCapabilities := c.Containers.DefaultCapabilities + defaultCapabilities := c.Containers.DefaultCapabilities.Get() if userNotRoot(user) { defaultCapabilities = []string{} } @@ -1100,34 +1077,6 @@ func Reload() (*Config, error) { return New(&Options{SetDefault: true}) } -func (c *Config) ActiveDestination() (uri, identity string, machine bool, err error) { - if uri, found := os.LookupEnv("CONTAINER_HOST"); found { - if v, found := os.LookupEnv("CONTAINER_SSHKEY"); found { - identity = v - } - return uri, identity, false, nil - } - connEnv := os.Getenv("CONTAINER_CONNECTION") - switch { - case connEnv != "": - d, found := c.Engine.ServiceDestinations[connEnv] - if !found { - return "", "", false, fmt.Errorf("environment variable CONTAINER_CONNECTION=%q service destination not found", connEnv) - } - return d.URI, d.Identity, d.IsMachine, nil - - case c.Engine.ActiveService != "": - d, found := c.Engine.ServiceDestinations[c.Engine.ActiveService] - if !found { - return "", "", false, fmt.Errorf("%q service destination not found", c.Engine.ActiveService) - } - return d.URI, d.Identity, d.IsMachine, nil - case c.Engine.RemoteURI != "": - return c.Engine.RemoteURI, c.Engine.RemoteIdentity, false, nil - } - return "", "", false, errors.New("no service destination configured") -} - var ( bindirFailed = false bindirCached = "" @@ -1156,7 +1105,7 @@ func findBindir() string { // FindHelperBinary will search the given binary name in the configured directories. // If searchPATH is set to true it will also search in $PATH. func (c *Config) FindHelperBinary(name string, searchPATH bool) (string, error) { - dirList := c.Engine.HelperBinariesDir + dirList := c.Engine.HelperBinariesDir.Get() bindirPath := "" bindirSearched := false @@ -1197,7 +1146,7 @@ func (c *Config) FindHelperBinary(name string, searchPATH bool) (string, error) return exec.LookPath(name) } configHint := "To resolve this error, set the helper_binaries_dir key in the `[engine]` section of containers.conf to the directory containing your helper binaries." - if len(c.Engine.HelperBinariesDir) == 0 { + if len(c.Engine.HelperBinariesDir.Get()) == 0 { return "", fmt.Errorf("could not find %q because there are no helper binary directories configured. %s", name, configHint) } return "", fmt.Errorf("could not find %q in one of %v. %s", name, c.Engine.HelperBinariesDir, configHint) @@ -1224,7 +1173,7 @@ func (c *Config) ImageCopyTmpDir() (string, error) { // setupEnv sets the environment variables for the engine func (c *Config) setupEnv() error { - for _, env := range c.Engine.Env { + for _, env := range c.Engine.Env.Get() { splitEnv := strings.SplitN(env, "=", 2) if len(splitEnv) != 2 { logrus.Warnf("invalid environment variable for engine %s, valid configuration is KEY=value pair", env) @@ -1282,3 +1231,20 @@ func ValidateImageVolumeMode(mode string) error { return fmt.Errorf("invalid image volume mode %q required value: %s", mode, strings.Join(validImageVolumeModes, ", ")) } + +// FindInitBinary will return the path to the init binary (catatonit) +func (c *Config) FindInitBinary() (string, error) { + // Sigh, for some reason we ended up with two InitPath field in containers.conf and + // both are used in podman so we have to keep supporting both to prevent regressions. + if c.Containers.InitPath != "" { + return c.Containers.InitPath, nil + } + if c.Engine.InitPath != "" { + return c.Engine.InitPath, nil + } + // keep old default working to guarantee backwards comapt + if _, err := os.Stat(DefaultInitPath); err == nil { + return DefaultInitPath, nil + } + return c.FindHelperBinary(defaultInitName, true) +} diff --git a/vendor/github.com/containers/common/pkg/config/config_darwin.go b/vendor/github.com/containers/common/pkg/config/config_darwin.go index eb83733d..1b40e2ba 100644 --- a/vendor/github.com/containers/common/pkg/config/config_darwin.go +++ b/vendor/github.com/containers/common/pkg/config/config_darwin.go @@ -32,6 +32,8 @@ func ifRootlessConfigPath() (string, error) { } var defaultHelperBinariesDir = []string{ + // Relative to the binary directory + "$BINDIR/../libexec/podman", // Homebrew install paths "/usr/local/opt/podman/libexec/podman", "/opt/homebrew/opt/podman/libexec/podman", @@ -42,6 +44,4 @@ var defaultHelperBinariesDir = []string{ "/usr/local/lib/podman", "/usr/libexec/podman", "/usr/lib/podman", - // Relative to the binary directory - "$BINDIR/../libexec/podman", } diff --git a/vendor/github.com/containers/common/pkg/config/config_local.go b/vendor/github.com/containers/common/pkg/config/config_local.go index 10d40ddd..dae3ea0d 100644 --- a/vendor/github.com/containers/common/pkg/config/config_local.go +++ b/vendor/github.com/containers/common/pkg/config/config_local.go @@ -10,8 +10,8 @@ import ( "regexp" "strings" - "github.com/container-orchestrated-devices/container-device-interface/pkg/parser" units "github.com/docker/go-units" + "tags.cncf.io/container-device-interface/pkg/parser" ) func (c *EngineConfig) validatePaths() error { @@ -31,7 +31,7 @@ func (c *EngineConfig) validatePaths() error { } func (c *ContainersConfig) validateDevices() error { - for _, d := range c.Devices { + for _, d := range c.Devices.Get() { if parser.IsQualifiedName(d) { continue } @@ -44,7 +44,7 @@ func (c *ContainersConfig) validateDevices() error { } func (c *ContainersConfig) validateUlimits() error { - for _, u := range c.DefaultUlimits { + for _, u := range c.DefaultUlimits.Get() { ul, err := units.ParseUlimit(u) if err != nil { return fmt.Errorf("unrecognized ulimit %s: %w", u, err) diff --git a/vendor/github.com/containers/common/pkg/config/containers.conf b/vendor/github.com/containers/common/pkg/config/containers.conf index 54f20d71..8c532f07 100644 --- a/vendor/github.com/containers/common/pkg/config/containers.conf +++ b/vendor/github.com/containers/common/pkg/config/containers.conf @@ -149,6 +149,9 @@ default_sysctls = [ #init = false # Container init binary, if init=true, this is the init binary to be used for containers. +# If this option is not set catatonit is searched in the directories listed under +# the helper_binaries_dir option. It is recommended to just install catatonit +# there instead of configuring this option here. # #init_path = "/usr/libexec/podman/catatonit" @@ -237,6 +240,18 @@ default_sysctls = [ # #prepare_volume_on_create = false +# Give extended privileges to all containers. A privileged container turns off +# the security features that isolate the container from the host. Dropped +# Capabilities, limited devices, read-only mount points, Apparmor/SELinux +# separation, and Seccomp filters are all disabled. Due to the disabled +# security features the privileged field should almost never be set as +# containers can easily break out of confinment. +# +# Containers running in a user namespace (e.g., rootless containers) cannot +# have more privileges than the user that launched them. +# +#privileged = false + # Run all containers with root file system mounted read-only # # read_only = false @@ -442,10 +457,14 @@ default_sysctls = [ # short-name aliases defined in containers-registries.conf(5). #compat_api_enforce_docker_hub = true -# The database backend of Podman. Supported values are "boltdb" (default) and -# "sqlite". Please run `podman-system-reset` prior to changing the database +# The database backend of Podman. Supported values are "" (default), "boltdb" +# and "sqlite". An empty value means it will check whenever a boltdb already +# exists and use it when it does, otherwise it will use sqlite as default +# (e.g. new installs). This allows for backwards compatibility with older versions. +# Please run `podman-system-reset` prior to changing the database # backend of an existing deployment, to make sure Podman can operate correctly. -#database_backend="boltdb" +# +#database_backend = "" # Specify the keys sequence used to detach a container. # Format is a single character [a-Z] or a comma separated sequence of @@ -669,8 +688,8 @@ default_sysctls = [ # [engine.service_destinations.production] # URI to access the Podman service # Examples: -# rootless "unix://run/user/$UID/podman/podman.sock" (Default) -# rootful "unix://run/podman/podman.sock (Default) +# rootless "unix:///run/user/$UID/podman/podman.sock" (Default) +# rootful "unix:///run/podman/podman.sock (Default) # remote rootless ssh://engineering.lab.company.com/run/user/1000/podman/podman.sock # remote rootful ssh://root@10.10.1.136:22/run/podman/podman.sock # diff --git a/vendor/github.com/containers/common/pkg/config/containers.conf-freebsd b/vendor/github.com/containers/common/pkg/config/containers.conf-freebsd index 13b7918d..f471e307 100644 --- a/vendor/github.com/containers/common/pkg/config/containers.conf-freebsd +++ b/vendor/github.com/containers/common/pkg/config/containers.conf-freebsd @@ -29,10 +29,14 @@ # #base_hosts_file = "" -# The database backend of Podman. Supported values are "boltdb" (default) and -# "sqlite". Please run `podman-system-reset` prior to changing the database +# The database backend of Podman. Supported values are "" (default), "boltdb" +# and "sqlite". An empty value means it will check whenever a boltdb already +# exists and use it when it does, otherwise it will use sqlite as default +# (e.g. new installs). This allows for backwards compatibility with older versions. +# Please run `podman-system-reset` prior to changing the database # backend of an existing deployment, to make sure Podman can operate correctly. -#database_backend="boltdb" +# +#database_backend = "" # List of default capabilities for containers. If it is empty or commented out, # the default capabilities defined in the container engine will be added. @@ -129,6 +133,9 @@ default_sysctls = [ #init = false # Container init binary, if init=true, this is the init binary to be used for containers. +# If this option is not set catatonit is searched in the directories listed under +# the helper_binaries_dir option. It is recommended to just install catatonit +# there instead of configuring this option here. # #init_path = "/usr/local/libexec/podman/catatonit" @@ -200,6 +207,18 @@ default_sysctls = [ # #prepare_volume_on_create = false +# Give extended privileges to all containers. A privileged container turns off +# the security features that isolate the container from the host. Dropped +# Capabilities, limited devices, read-only mount points, Apparmor/SELinux +# separation, and Seccomp filters are all disabled. Due to the disabled +# security features the privileged field should almost never be set as +# containers can easily break out of confinment. +# +# Containers running in a user namespace (e.g., rootless containers) cannot +# have more privileges than the user that launched them. +# +#privileged = false + # Set timezone in container. Takes IANA timezones as well as "local", # which sets the timezone in the container to match the host machine. # @@ -541,8 +560,8 @@ default_sysctls = [ # [service_destinations.production] # URI to access the Podman service # Examples: -# rootless "unix://run/user/$UID/podman/podman.sock" (Default) -# rootful "unix://run/podman/podman.sock (Default) +# rootless "unix:///run/user/$UID/podman/podman.sock" (Default) +# rootful "unix:///run/podman/podman.sock (Default) # remote rootless ssh://engineering.lab.company.com/run/user/1000/podman/podman.sock # remote rootful ssh://root@10.10.1.136:22/run/podman/podman.sock # diff --git a/vendor/github.com/containers/common/pkg/config/db_backend.go b/vendor/github.com/containers/common/pkg/config/db_backend.go index 8fd78165..a2fda8e0 100644 --- a/vendor/github.com/containers/common/pkg/config/db_backend.go +++ b/vendor/github.com/containers/common/pkg/config/db_backend.go @@ -13,6 +13,12 @@ const ( // SQLite backend. DBBackendSQLite + // DBBackendDefault describes that no explicit backend has been set. + // It should default to sqlite unless there is already an existing boltdb, + // this allows for backwards compatibility on upgrades. The actual detection + // logic must live in podman as we only know there were to look for the file. + DBBackendDefault + stringBoltDB = "boltdb" stringSQLite = "sqlite" ) @@ -24,6 +30,8 @@ func (d DBBackend) String() string { return stringBoltDB case DBBackendSQLite: return stringSQLite + case DBBackendDefault: + return "" default: return fmt.Sprintf("unsupported database backend: %d", d) } @@ -32,7 +40,7 @@ func (d DBBackend) String() string { // Validate returns whether the DBBackend is supported. func (d DBBackend) Validate() error { switch d { - case DBBackendBoltDB, DBBackendSQLite: + case DBBackendBoltDB, DBBackendSQLite, DBBackendDefault: return nil default: return fmt.Errorf("unsupported database backend: %d", d) @@ -49,12 +57,9 @@ func ParseDBBackend(raw string) (DBBackend, error) { return DBBackendBoltDB, nil case stringSQLite: return DBBackendSQLite, nil + case "": + return DBBackendDefault, nil default: return DBBackendUnsupported, fmt.Errorf("unsupported database backend: %q", raw) } } - -// DBBackend returns the configured database backend. -func (c *Config) DBBackend() (DBBackend, error) { - return ParseDBBackend(c.Engine.DBBackend) -} diff --git a/vendor/github.com/containers/common/pkg/config/default.go b/vendor/github.com/containers/common/pkg/config/default.go index 942c0406..ea2ede1c 100644 --- a/vendor/github.com/containers/common/pkg/config/default.go +++ b/vendor/github.com/containers/common/pkg/config/default.go @@ -6,8 +6,10 @@ import ( "net" "os" "path/filepath" + "runtime" "strings" + "github.com/containers/common/internal/attributedstring" nettypes "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/apparmor" "github.com/containers/common/pkg/cgroupv2" @@ -29,9 +31,36 @@ const ( // _defaultImageVolumeMode is a mode to handle built-in image volumes. _defaultImageVolumeMode = _typeBind + + // defaultInitName is the default name of the init binary + defaultInitName = "catatonit" ) var ( + DefaultMaskedPaths = []string{ + "/proc/acpi", + "/proc/kcore", + "/proc/keys", + "/proc/latency_stats", + "/proc/sched_debug", + "/proc/scsi", + "/proc/timer_list", + "/proc/timer_stats", + "/sys/dev/block", + "/sys/devices/virtual/powercap", + "/sys/firmware", + "/sys/fs/selinux", + } + + DefaultReadOnlyPaths = []string{ + "/proc/asound", + "/proc/bus", + "/proc/fs", + "/proc/irq", + "/proc/sys", + "/proc/sysrq-trigger", + } + // DefaultInfraImage is the default image to run as infrastructure containers in pods. DefaultInfraImage = "" // DefaultRootlessSHMLockPath is the default path for rootless SHM locks. @@ -97,6 +126,8 @@ var ( "/usr/libexec/docker/cli-plugins/docker-compose", "podman-compose", } + + defaultContainerEnv = []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"} ) // nolint:unparam @@ -119,8 +150,6 @@ const ( CgroupfsCgroupsManager = "cgroupfs" // DefaultApparmorProfile specifies the default apparmor profile for the container. DefaultApparmorProfile = apparmor.Profile - // DefaultDBBackend specifies the default database backend to be used by Podman. - DefaultDBBackend = DBBackendBoltDB // DefaultHostsFile is the default path to the hosts file. DefaultHostsFile = "/etc/hosts" // SystemdCgroupsManager represents systemd native cgroup manager. @@ -188,41 +217,39 @@ func defaultConfig() (*Config, error) { return &Config{ Containers: ContainersConfig{ - Annotations: []string{}, + Annotations: attributedstring.Slice{}, ApparmorProfile: DefaultApparmorProfile, BaseHostsFile: "", CgroupNS: cgroupNS, Cgroups: getDefaultCgroupsMode(), - DNSOptions: []string{}, - DNSSearches: []string{}, - DNSServers: []string{}, - DefaultCapabilities: DefaultCapabilities, - DefaultSysctls: []string{}, - DefaultUlimits: getDefaultProcessLimits(), - Devices: []string{}, + DNSOptions: attributedstring.Slice{}, + DNSSearches: attributedstring.Slice{}, + DNSServers: attributedstring.Slice{}, + DefaultCapabilities: attributedstring.NewSlice(DefaultCapabilities), + DefaultSysctls: attributedstring.Slice{}, + DefaultUlimits: attributedstring.NewSlice(getDefaultProcessLimits()), + Devices: attributedstring.Slice{}, EnableKeyring: true, EnableLabeling: selinuxEnabled(), - Env: []string{ - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", - }, - EnvHost: false, - HTTPProxy: true, - IPCNS: "shareable", - Init: false, - InitPath: "", - LogDriver: defaultLogDriver(), - LogSizeMax: DefaultLogSizeMax, - Mounts: []string{}, - NetNS: "private", - NoHosts: false, - PidNS: "private", - PidsLimit: DefaultPidsLimit, - ShmSize: DefaultShmSize, - TZ: "", - UTSNS: "private", - Umask: "0022", - UserNSSize: DefaultUserNSSize, // Deprecated - Volumes: []string{}, + Env: attributedstring.NewSlice(defaultContainerEnv), + EnvHost: false, + HTTPProxy: true, + IPCNS: "shareable", + Init: false, + InitPath: "", + LogDriver: defaultLogDriver(), + LogSizeMax: DefaultLogSizeMax, + Mounts: attributedstring.Slice{}, + NetNS: "private", + NoHosts: false, + PidNS: "private", + PidsLimit: DefaultPidsLimit, + ShmSize: DefaultShmSize, + TZ: "", + UTSNS: "private", + Umask: "0022", + UserNSSize: DefaultUserNSSize, // Deprecated + Volumes: attributedstring.Slice{}, }, Network: NetworkConfig{ DefaultNetwork: "podman", @@ -230,8 +257,8 @@ func defaultConfig() (*Config, error) { DefaultSubnetPools: DefaultSubnetPools, DefaultRootlessNetworkCmd: "slirp4netns", DNSBindPort: 0, - CNIPluginDirs: DefaultCNIPluginDirs, - NetavarkPluginDirs: DefaultNetavarkPluginDirs, + CNIPluginDirs: attributedstring.NewSlice(DefaultCNIPluginDirs), + NetavarkPluginDirs: attributedstring.NewSlice(DefaultNetavarkPluginDirs), }, Engine: *defaultEngineConfig, Secrets: defaultSecretConfig(), @@ -250,13 +277,17 @@ func defaultSecretConfig() SecretConfig { // defaultMachineConfig returns the default machine configuration. func defaultMachineConfig() MachineConfig { + cpus := runtime.NumCPU() / 2 + if cpus == 0 { + cpus = 1 + } return MachineConfig{ - CPUs: 1, + CPUs: uint64(cpus), DiskSize: 100, Image: getDefaultMachineImage(), Memory: 2048, User: getDefaultMachineUser(), - Volumes: getDefaultMachineVolumes(), + Volumes: attributedstring.NewSlice(getDefaultMachineVolumes()), } } @@ -268,7 +299,7 @@ func defaultFarmConfig() FarmConfig { } } -// defaultEngineConfig eturns a default engine configuration. Note that the +// defaultEngineConfig returns a default engine configuration. Note that the // config is different for root and rootless. It also parses the storage.conf. func defaultEngineConfig() (*EngineConfig, error) { c := new(EngineConfig) @@ -281,7 +312,7 @@ func defaultEngineConfig() (*EngineConfig, error) { c.EventsLogFileMaxSize = eventsLogMaxSize(DefaultEventsLogSizeMax) c.CompatAPIEnforceDockerHub = true - c.ComposeProviders = getDefaultComposeProviders() // may vary across supported platforms + c.ComposeProviders.Set(getDefaultComposeProviders()) // may vary across supported platforms c.ComposeWarningLogs = true if path, ok := os.LookupEnv("CONTAINERS_STORAGE_CONF"); ok { @@ -301,20 +332,18 @@ func defaultEngineConfig() (*EngineConfig, error) { c.graphRoot = storeOpts.GraphRoot c.ImageCopyTmpDir = getDefaultTmpDir() - c.StaticDir = filepath.Join(storeOpts.GraphRoot, "libpod") - c.VolumePath = filepath.Join(storeOpts.GraphRoot, "volumes") c.VolumePluginTimeout = DefaultVolumePluginTimeout c.CompressionFormat = "gzip" - c.HelperBinariesDir = defaultHelperBinariesDir + c.HelperBinariesDir.Set(defaultHelperBinariesDir) if additionalHelperBinariesDir != "" { - c.HelperBinariesDir = append(c.HelperBinariesDir, additionalHelperBinariesDir) + // Prioritize addtionalHelperBinariesDir over defaults. + c.HelperBinariesDir.Set(append([]string{additionalHelperBinariesDir}, c.HelperBinariesDir.Get()...)) } - c.HooksDir = DefaultHooksDirs + c.HooksDir.Set(DefaultHooksDirs) c.ImageDefaultTransport = _defaultTransport c.ImageVolumeMode = _defaultImageVolumeMode - c.StateType = BoltDBStateStore c.ImageBuildFormat = "oci" @@ -397,10 +426,8 @@ func defaultEngineConfig() (*EngineConfig, error) { // Needs to be called after populating c.OCIRuntimes. c.OCIRuntime = c.findRuntime() - c.ConmonEnvVars = []string{ - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", - } - c.ConmonPath = []string{ + c.ConmonEnvVars.Set([]string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"}) + c.ConmonPath.Set([]string{ "/usr/libexec/podman/conmon", "/usr/local/libexec/podman/conmon", "/usr/local/lib/podman/conmon", @@ -409,8 +436,8 @@ func defaultEngineConfig() (*EngineConfig, error) { "/usr/local/bin/conmon", "/usr/local/sbin/conmon", "/run/current-system/sw/bin/conmon", - } - c.ConmonRsPath = []string{ + }) + c.ConmonRsPath.Set([]string{ "/usr/libexec/podman/conmonrs", "/usr/local/libexec/podman/conmonrs", "/usr/local/lib/podman/conmonrs", @@ -419,10 +446,9 @@ func defaultEngineConfig() (*EngineConfig, error) { "/usr/local/bin/conmonrs", "/usr/local/sbin/conmonrs", "/run/current-system/sw/bin/conmonrs", - } + }) c.PullPolicy = DefaultPullPolicy - c.DBBackend = stringBoltDB - c.RuntimeSupportsJSON = []string{ + c.RuntimeSupportsJSON.Set([]string{ "crun", "runc", "kata", @@ -430,10 +456,9 @@ func defaultEngineConfig() (*EngineConfig, error) { "youki", "krun", "ocijail", - } - c.RuntimeSupportsNoCgroups = []string{"crun", "krun"} - c.RuntimeSupportsKVM = []string{"kata", "kata-runtime", "kata-qemu", "kata-fc", "krun"} - c.InitPath = DefaultInitPath + }) + c.RuntimeSupportsNoCgroups.Set([]string{"crun", "krun"}) + c.RuntimeSupportsKVM.Set([]string{"kata", "kata-runtime", "kata-qemu", "kata-fc", "krun"}) c.NoPivotRoot = false c.InfraImage = DefaultInfraImage @@ -503,47 +528,42 @@ func (c *Config) SecurityOptions() []string { // Sysctls returns the default sysctls to set in containers. func (c *Config) Sysctls() []string { - return c.Containers.DefaultSysctls + return c.Containers.DefaultSysctls.Get() } // Volumes returns the default set of volumes that should be mounted in containers. func (c *Config) Volumes() []string { - return c.Containers.Volumes + return c.Containers.Volumes.Get() } // Mounts returns the default set of mounts that should be mounted in containers. func (c *Config) Mounts() []string { - return c.Containers.Mounts + return c.Containers.Mounts.Get() } // Devices returns the default additional devices for containers. func (c *Config) Devices() []string { - return c.Containers.Devices + return c.Containers.Devices.Get() } // DNSServers returns the default DNS servers to add to resolv.conf in containers. func (c *Config) DNSServers() []string { - return c.Containers.DNSServers + return c.Containers.DNSServers.Get() } // DNSSerches returns the default DNS searches to add to resolv.conf in containers. func (c *Config) DNSSearches() []string { - return c.Containers.DNSSearches + return c.Containers.DNSSearches.Get() } // DNSOptions returns the default DNS options to add to resolv.conf in containers. func (c *Config) DNSOptions() []string { - return c.Containers.DNSOptions + return c.Containers.DNSOptions.Get() } // Env returns the default additional environment variables to add to containers. func (c *Config) Env() []string { - return c.Containers.Env -} - -// InitPath returns location where init program added to containers when users specify the --init flag. -func (c *Config) InitPath() string { - return c.Containers.InitPath + return c.Containers.Env.Get() } // IPCNS returns the default IPC Namespace configuration to run containers with. @@ -578,7 +598,7 @@ func (c *Config) ShmSize() string { // Ulimits returns the default ulimits to use in containers. func (c *Config) Ulimits() []string { - return c.Containers.DefaultUlimits + return c.Containers.DefaultUlimits.Get() } // PidsLimit returns the default maximum number of pids to use in containers. @@ -623,7 +643,7 @@ func (c *Config) MachineEnabled() bool { // MachineVolumes returns volumes to mount into the VM. func (c *Config) MachineVolumes() ([]string, error) { - return machineVolumes(c.Machine.Volumes) + return machineVolumes(c.Machine.Volumes.Get()) } func machineVolumes(volumes []string) ([]string, error) { diff --git a/vendor/github.com/containers/common/pkg/filters/filters.go b/vendor/github.com/containers/common/pkg/filters/filters.go index 729061db..3d442a53 100644 --- a/vendor/github.com/containers/common/pkg/filters/filters.go +++ b/vendor/github.com/containers/common/pkg/filters/filters.go @@ -5,9 +5,11 @@ import ( "fmt" "net/http" "path/filepath" + "regexp" "strings" "time" + "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/timetype" ) @@ -105,13 +107,7 @@ func PrepareFilters(r *http.Request) (map[string][]string, error) { func MatchLabelFilters(filterValues []string, labels map[string]string) bool { outer: for _, filterValue := range filterValues { - filterArray := strings.SplitN(filterValue, "=", 2) - filterKey := filterArray[0] - if len(filterArray) > 1 { - filterValue = filterArray[1] - } else { - filterValue = "" - } + filterKey, filterValue := splitFilterValue(filterValue) for labelKey, labelValue := range labels { if filterValue == "" || labelValue == filterValue { if labelKey == filterKey || matchPattern(filterKey, labelKey) { @@ -124,6 +120,32 @@ outer: return true } +// MatchNegatedLabelFilters matches negated labels and returns true if they are valid +func MatchNegatedLabelFilters(filterValues []string, labels map[string]string) bool { + for _, filterValue := range filterValues { + filterKey, filterValue := splitFilterValue(filterValue) + for labelKey, labelValue := range labels { + if filterValue == "" || labelValue == filterValue { + if labelKey == filterKey || matchPattern(filterKey, labelKey) { + return false + } + } + } + } + return true +} + +func splitFilterValue(filterValue string) (string, string) { + filterArray := strings.SplitN(filterValue, "=", 2) + filterKey := filterArray[0] + if len(filterArray) > 1 { + filterValue = filterArray[1] + } else { + filterValue = "" + } + return filterKey, filterValue +} + func matchPattern(pattern string, value string) bool { if strings.Contains(pattern, "*") { filter := fmt.Sprintf("*%s*", pattern) @@ -134,3 +156,22 @@ func matchPattern(pattern string, value string) bool { } return false } + +// FilterID is a function used to compare an id against a set of ids, if the +// input is hex we check if the prefix matches. Otherwise we assume it is a +// regex and try to match that. +// see https://github.com/containers/podman/issues/18471 for why we do this +func FilterID(id string, filters []string) bool { + for _, want := range filters { + isRegex := types.NotHexRegex.MatchString(want) + if isRegex { + match, err := regexp.MatchString(want, id) + if err == nil && match { + return true + } + } else if strings.HasPrefix(id, strings.ToLower(want)) { + return true + } + } + return false +} diff --git a/vendor/github.com/containers/common/pkg/password/password_supported.go b/vendor/github.com/containers/common/pkg/password/password_supported.go new file mode 100644 index 00000000..56e95b3d --- /dev/null +++ b/vendor/github.com/containers/common/pkg/password/password_supported.go @@ -0,0 +1,57 @@ +//go:build linux || darwin || freebsd +// +build linux darwin freebsd + +package password + +import ( + "errors" + "os" + "os/signal" + "syscall" + + terminal "golang.org/x/term" +) + +var ErrInterrupt = errors.New("interrupted") + +// Read reads a password from the terminal without echo. +func Read(fd int) ([]byte, error) { + // Store and restore the terminal status on interruptions to + // avoid that the terminal remains in the password state + // This is necessary as for https://github.com/golang/go/issues/31180 + + oldState, err := terminal.GetState(fd) + if err != nil { + return make([]byte, 0), err + } + + type Buffer struct { + Buffer []byte + Error error + } + errorChannel := make(chan Buffer, 1) + + // SIGINT and SIGTERM restore the terminal, otherwise the no-echo mode would remain intact + interruptChannel := make(chan os.Signal, 1) + signal.Notify(interruptChannel, syscall.SIGINT, syscall.SIGTERM) + defer func() { + signal.Stop(interruptChannel) + close(interruptChannel) + }() + go func() { + for range interruptChannel { + if oldState != nil { + _ = terminal.Restore(fd, oldState) + } + errorChannel <- Buffer{Buffer: make([]byte, 0), Error: ErrInterrupt} + } + }() + + go func() { + buf, err := terminal.ReadPassword(fd) + errorChannel <- Buffer{Buffer: buf, Error: err} + }() + + buf := <-errorChannel + return buf.Buffer, buf.Error +} diff --git a/vendor/github.com/containers/common/pkg/password/password_windows.go b/vendor/github.com/containers/common/pkg/password/password_windows.go new file mode 100644 index 00000000..7a0822d0 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/password/password_windows.go @@ -0,0 +1,21 @@ +//go:build windows +// +build windows + +package password + +import ( + terminal "golang.org/x/term" +) + +// Read reads a password from the terminal. +func Read(fd int) ([]byte, error) { + oldState, err := terminal.GetState(fd) + if err != nil { + return make([]byte, 0), err + } + buf, err := terminal.ReadPassword(fd) + if oldState != nil { + _ = terminal.Restore(fd, oldState) + } + return buf, err +} diff --git a/vendor/github.com/containers/common/pkg/rootlessport/rootlessport_linux.go b/vendor/github.com/containers/common/pkg/rootlessport/rootlessport_linux.go new file mode 100644 index 00000000..7ea018a2 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/rootlessport/rootlessport_linux.go @@ -0,0 +1,27 @@ +//go:build linux +// +build linux + +// Rootlessport Config type for use in podman/cmd/rootlessport. +package rootlessport + +import ( + "github.com/containers/common/libnetwork/types" +) + +const ( + // BinaryName is the binary name for the parent process. + BinaryName = "rootlessport" +) + +// Config needs to be provided to the process via stdin as a JSON string. +// stdin needs to be closed after the message has been written. +type Config struct { + Mappings []types.PortMapping + NetNSPath string + ExitFD int + ReadyFD int + TmpDir string + ChildIP string + ContainerID string + RootlessCNI bool +} diff --git a/vendor/github.com/containers/common/pkg/servicereaper/service.go b/vendor/github.com/containers/common/pkg/servicereaper/service.go new file mode 100644 index 00000000..11482c59 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/servicereaper/service.go @@ -0,0 +1,65 @@ +//go:build linux || freebsd +// +build linux freebsd + +package servicereaper + +import ( + "os" + "os/signal" + "sync" + "syscall" + + "github.com/sirupsen/logrus" +) + +type service struct { + pidMap map[int]bool + mutex *sync.Mutex +} + +var s = service{ + pidMap: map[int]bool{}, + mutex: &sync.Mutex{}, +} + +func AddPID(pid int) { + s.mutex.Lock() + s.pidMap[pid] = true + s.mutex.Unlock() +} + +func Start() { + // create signal channel and only wait for SIGCHLD + sigc := make(chan os.Signal, 1) + signal.Notify(sigc, syscall.SIGCHLD) + // wait and reap in an extra goroutine + go reaper(sigc) +} + +func reaper(sigc chan os.Signal) { + for { + // block until we receive SIGCHLD + <-sigc + s.mutex.Lock() + for pid := range s.pidMap { + var status syscall.WaitStatus + waitpid, err := syscall.Wait4(pid, &status, syscall.WNOHANG, nil) + if err != nil { + // do not log error for ECHILD + if err != syscall.ECHILD { + logrus.Warnf("Wait for pid %d failed: %v ", pid, err) + } + delete(s.pidMap, pid) + continue + } + // if pid == 0 nothing happened + if waitpid == 0 { + continue + } + if status.Exited() || status.Signaled() { + delete(s.pidMap, pid) + } + } + s.mutex.Unlock() + } +} diff --git a/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go b/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go index a2924737..6ba2154a 100644 --- a/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go +++ b/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go @@ -362,6 +362,35 @@ func addFIPSModeSubscription(mounts *[]rspec.Mount, containerRunDir, mountPoint, } *mounts = append(*mounts, m) } + + // Make sure we set the config to FIPS so that the container does not overwrite + // /etc/crypto-policies/back-ends when crypto-policies-scripts is reinstalled. + cryptoPoliciesConfigFile := filepath.Join(containerRunDir, "fips-config") + file, err := os.Create(cryptoPoliciesConfigFile) + if err != nil { + return fmt.Errorf("creating fips config file in container for FIPS mode: %w", err) + } + defer file.Close() + if _, err := file.WriteString("FIPS\n"); err != nil { + return fmt.Errorf("writing fips config file in container for FIPS mode: %w", err) + } + if err = label.Relabel(cryptoPoliciesConfigFile, mountLabel, false); err != nil { + return fmt.Errorf("applying correct labels on fips-config file: %w", err) + } + if err := file.Chown(uid, gid); err != nil { + return fmt.Errorf("chown fips-config file: %w", err) + } + + policyConfig := "/etc/crypto-policies/config" + if !mountExists(*mounts, policyConfig) { + m := rspec.Mount{ + Source: cryptoPoliciesConfigFile, + Destination: policyConfig, + Type: "bind", + Options: []string{"bind", "rprivate"}, + } + *mounts = append(*mounts, m) + } return nil } diff --git a/vendor/github.com/containers/common/pkg/util/copy.go b/vendor/github.com/containers/common/pkg/util/copy.go deleted file mode 100644 index a45b82fc..00000000 --- a/vendor/github.com/containers/common/pkg/util/copy.go +++ /dev/null @@ -1,57 +0,0 @@ -package util - -import ( - "errors" - "io" -) - -// ErrDetach indicates that an attach session was manually detached by -// the user. -var ErrDetach = errors.New("detached from container") - -// CopyDetachable is similar to io.Copy but support a detach key sequence to break out. -func CopyDetachable(dst io.Writer, src io.Reader, keys []byte) (written int64, err error) { - buf := make([]byte, 32*1024) - for { - nr, er := src.Read(buf) - if nr > 0 { - preservBuf := []byte{} - for i, key := range keys { - preservBuf = append(preservBuf, buf[0:nr]...) - if nr != 1 || buf[0] != key { - break - } - if i == len(keys)-1 { - return 0, ErrDetach - } - nr, er = src.Read(buf) - } - var nw int - var ew error - if len(preservBuf) > 0 { - nw, ew = dst.Write(preservBuf) - nr = len(preservBuf) - } else { - nw, ew = dst.Write(buf[0:nr]) - } - if nw > 0 { - written += int64(nw) - } - if ew != nil { - err = ew - break - } - if nr != nw { - err = io.ErrShortWrite - break - } - } - if er != nil { - if er != io.EOF { - err = er - } - break - } - } - return written, err -} diff --git a/vendor/github.com/containers/common/pkg/util/util.go b/vendor/github.com/containers/common/pkg/util/util.go index 221b0119..708472ba 100644 --- a/vendor/github.com/containers/common/pkg/util/util.go +++ b/vendor/github.com/containers/common/pkg/util/util.go @@ -1,119 +1,16 @@ package util import ( - "bytes" - "errors" "fmt" "os" - "os/exec" "path/filepath" "regexp" - "strings" "time" - "github.com/containers/common/libnetwork/types" "github.com/fsnotify/fsnotify" "github.com/sirupsen/logrus" ) -const ( - UnknownPackage = "Unknown" -) - -var ErrInterrupt = errors.New("interrupted") - -// Note: This function is copied from containers/podman libpod/util.go -// Please see https://github.com/containers/common/pull/1460 -func queryPackageVersion(cmdArg ...string) string { - output := UnknownPackage - if 1 < len(cmdArg) { - cmd := exec.Command(cmdArg[0], cmdArg[1:]...) - if outp, err := cmd.Output(); err == nil { - output = string(outp) - deb := false - if cmdArg[0] == "/usr/bin/dlocate" { - // can return multiple matches - l := strings.Split(output, "\n") - output = l[0] - deb = true - } else if cmdArg[0] == "/usr/bin/dpkg" { - deb = true - } - if deb { - r := strings.Split(output, ": ") - queryFormat := `${Package}_${Version}_${Architecture}` - cmd = exec.Command("/usr/bin/dpkg-query", "-f", queryFormat, "-W", r[0]) - if outp, err := cmd.Output(); err == nil { - output = string(outp) - } - } - } - if cmdArg[0] == "/sbin/apk" { - prefix := cmdArg[len(cmdArg)-1] + " is owned by " - output = strings.Replace(output, prefix, "", 1) - } - } - return strings.Trim(output, "\n") -} - -// Note: This function is copied from containers/podman libpod/util.go -// Please see https://github.com/containers/common/pull/1460 -func PackageVersion(program string) string { // program is full path - _, err := os.Stat(program) - if err != nil { - return UnknownPackage - } - packagers := [][]string{ - {"/usr/bin/rpm", "-q", "-f"}, - {"/usr/bin/dlocate", "-F"}, // Debian, Ubuntu (quick) - {"/usr/bin/dpkg", "-S"}, // Debian, Ubuntu (slow) - {"/usr/bin/pacman", "-Qo"}, // Arch - {"/usr/bin/qfile", "-qv"}, // Gentoo (quick) - {"/usr/bin/equery", "b"}, // Gentoo (slow) - {"/sbin/apk", "info", "-W"}, // Alpine - {"/usr/local/sbin/pkg", "which", "-q"}, // FreeBSD - } - - for _, cmd := range packagers { - cmd = append(cmd, program) - if out := queryPackageVersion(cmd...); out != UnknownPackage { - return out - } - } - return UnknownPackage -} - -// Note: This function is copied from containers/podman libpod/util.go -// Please see https://github.com/containers/common/pull/1460 -func ProgramVersion(program string) (string, error) { - return programVersion(program, false) -} - -func ProgramVersionDnsname(program string) (string, error) { - return programVersion(program, true) -} - -func programVersion(program string, dnsname bool) (string, error) { - cmd := exec.Command(program, "--version") - var stdout bytes.Buffer - var stderr bytes.Buffer - cmd.Stdout = &stdout - cmd.Stderr = &stderr - - err := cmd.Run() - if err != nil { - return "", fmt.Errorf("`%v --version` failed: %v %v (%v)", program, stderr.String(), stdout.String(), err) - } - - output := strings.TrimSuffix(stdout.String(), "\n") - // dnsname --version returns the information to stderr - if dnsname { - output = strings.TrimSuffix(stderr.String(), "\n") - } - - return output, nil -} - // StringInSlice determines if a string is in a string slice, returns bool func StringInSlice(s string, sl []string) bool { for _, i := range sl { @@ -135,25 +32,6 @@ func StringMatchRegexSlice(s string, re []string) bool { return false } -// FilterID is a function used to compare an id against a set of ids, if the -// input is hex we check if the prefix matches. Otherwise we assume it is a -// regex and try to match that. -// see https://github.com/containers/podman/issues/18471 for why we do this -func FilterID(id string, filters []string) bool { - for _, want := range filters { - isRegex := types.NotHexRegex.MatchString(want) - if isRegex { - match, err := regexp.MatchString(want, id) - if err == nil && match { - return true - } - } else if strings.HasPrefix(id, strings.ToLower(want)) { - return true - } - } - return false -} - // WaitForFile waits until a file has been created or the given timeout has occurred func WaitForFile(path string, chWait chan error, timeout time.Duration) (bool, error) { var inotifyEvents chan fsnotify.Event diff --git a/vendor/github.com/containers/common/pkg/util/util_supported.go b/vendor/github.com/containers/common/pkg/util/util_supported.go index aa659c17..0cd53af5 100644 --- a/vendor/github.com/containers/common/pkg/util/util_supported.go +++ b/vendor/github.com/containers/common/pkg/util/util_supported.go @@ -7,7 +7,6 @@ import ( "errors" "fmt" "os" - "os/signal" "path/filepath" "sync" "syscall" @@ -15,7 +14,6 @@ import ( "github.com/containers/storage/pkg/homedir" "github.com/containers/storage/pkg/unshare" "github.com/sirupsen/logrus" - terminal "golang.org/x/term" ) var ( @@ -91,45 +89,3 @@ func GetRuntimeDir() (string, error) { } return rootlessRuntimeDir, nil } - -// ReadPassword reads a password from the terminal without echo. -func ReadPassword(fd int) ([]byte, error) { - // Store and restore the terminal status on interruptions to - // avoid that the terminal remains in the password state - // This is necessary as for https://github.com/golang/go/issues/31180 - - oldState, err := terminal.GetState(fd) - if err != nil { - return make([]byte, 0), err - } - - type Buffer struct { - Buffer []byte - Error error - } - errorChannel := make(chan Buffer, 1) - - // SIGINT and SIGTERM restore the terminal, otherwise the no-echo mode would remain intact - interruptChannel := make(chan os.Signal, 1) - signal.Notify(interruptChannel, syscall.SIGINT, syscall.SIGTERM) - defer func() { - signal.Stop(interruptChannel) - close(interruptChannel) - }() - go func() { - for range interruptChannel { - if oldState != nil { - _ = terminal.Restore(fd, oldState) - } - errorChannel <- Buffer{Buffer: make([]byte, 0), Error: ErrInterrupt} - } - }() - - go func() { - buf, err := terminal.ReadPassword(fd) - errorChannel <- Buffer{Buffer: buf, Error: err} - }() - - buf := <-errorChannel - return buf.Buffer, buf.Error -} diff --git a/vendor/github.com/containers/common/pkg/util/util_windows.go b/vendor/github.com/containers/common/pkg/util/util_windows.go index cc431e8a..1525bdc3 100644 --- a/vendor/github.com/containers/common/pkg/util/util_windows.go +++ b/vendor/github.com/containers/common/pkg/util/util_windows.go @@ -5,24 +5,9 @@ package util import ( "errors" - - terminal "golang.org/x/term" ) // getRuntimeDir returns the runtime directory func GetRuntimeDir() (string, error) { return "", errors.New("this function is not implemented for windows") } - -// ReadPassword reads a password from the terminal. -func ReadPassword(fd int) ([]byte, error) { - oldState, err := terminal.GetState(fd) - if err != nil { - return make([]byte, 0), err - } - buf, err := terminal.ReadPassword(fd) - if oldState != nil { - _ = terminal.Restore(fd, oldState) - } - return buf, err -} diff --git a/vendor/github.com/containers/common/pkg/version/version.go b/vendor/github.com/containers/common/pkg/version/version.go new file mode 100644 index 00000000..0d830060 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/version/version.go @@ -0,0 +1,105 @@ +package version + +import ( + "bytes" + "fmt" + "os" + "os/exec" + "strings" +) + +const ( + UnknownPackage = "Unknown" +) + +// Note: This function is copied from containers/podman libpod/util.go +// Please see https://github.com/containers/common/pull/1460 +func queryPackageVersion(cmdArg ...string) string { + output := UnknownPackage + if 1 < len(cmdArg) { + cmd := exec.Command(cmdArg[0], cmdArg[1:]...) + if outp, err := cmd.Output(); err == nil { + output = string(outp) + deb := false + if cmdArg[0] == "/usr/bin/dlocate" { + // can return multiple matches + l := strings.Split(output, "\n") + output = l[0] + deb = true + } else if cmdArg[0] == "/usr/bin/dpkg" { + deb = true + } + if deb { + r := strings.Split(output, ": ") + queryFormat := `${Package}_${Version}_${Architecture}` + cmd = exec.Command("/usr/bin/dpkg-query", "-f", queryFormat, "-W", r[0]) + if outp, err := cmd.Output(); err == nil { + output = string(outp) + } + } + } + if cmdArg[0] == "/sbin/apk" { + prefix := cmdArg[len(cmdArg)-1] + " is owned by " + output = strings.Replace(output, prefix, "", 1) + } + } + return strings.Trim(output, "\n") +} + +// Note: This function is copied from containers/podman libpod/util.go +// Please see https://github.com/containers/common/pull/1460 +func Package(program string) string { // program is full path + _, err := os.Stat(program) + if err != nil { + return UnknownPackage + } + packagers := [][]string{ + {"/usr/bin/rpm", "-q", "-f"}, + {"/usr/bin/dlocate", "-F"}, // Debian, Ubuntu (quick) + {"/usr/bin/dpkg", "-S"}, // Debian, Ubuntu (slow) + {"/usr/bin/pacman", "-Qo"}, // Arch + {"/usr/bin/qfile", "-qv"}, // Gentoo (quick) + {"/usr/bin/equery", "b"}, // Gentoo (slow) + {"/sbin/apk", "info", "-W"}, // Alpine + {"/usr/local/sbin/pkg", "which", "-q"}, // FreeBSD + } + + for _, cmd := range packagers { + cmd = append(cmd, program) + if out := queryPackageVersion(cmd...); out != UnknownPackage { + return out + } + } + return UnknownPackage +} + +// Note: This function is copied from containers/podman libpod/util.go +// Please see https://github.com/containers/common/pull/1460 +func Program(name string) (string, error) { + return program(name, false) +} + +func ProgramDnsname(name string) (string, error) { + return program(name, true) +} + +func program(program string, dnsname bool) (string, error) { + cmd := exec.Command(program, "--version") + var stdout bytes.Buffer + var stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + err := cmd.Run() + if err != nil { + return "", fmt.Errorf("`%v --version` failed: %v %v (%v)", program, stderr.String(), stdout.String(), err) + } + + output := strings.TrimSuffix(stdout.String(), "\n") + // dnsname --version returns the information to stderr + if dnsname { + output = strings.TrimSuffix(stderr.String(), "\n") + } + + return output, nil +} diff --git a/vendor/github.com/containers/common/version/version.go b/vendor/github.com/containers/common/version/version.go index a0a57b54..19ba92c0 100644 --- a/vendor/github.com/containers/common/version/version.go +++ b/vendor/github.com/containers/common/version/version.go @@ -1,4 +1,4 @@ package version // Version is the version of the build. -const Version = "0.56.0" +const Version = "0.57.4" diff --git a/vendor/github.com/containers/image/v5/copy/compression.go b/vendor/github.com/containers/image/v5/copy/compression.go index 6ba70f0b..a42e3b67 100644 --- a/vendor/github.com/containers/image/v5/copy/compression.go +++ b/vendor/github.com/containers/image/v5/copy/compression.go @@ -284,11 +284,24 @@ func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInf } } if d.uploadedCompressorName != "" && d.uploadedCompressorName != internalblobinfocache.UnknownCompression { - c.blobInfoCache.RecordDigestCompressorName(uploadedInfo.Digest, d.uploadedCompressorName) + if d.uploadedCompressorName != compressiontypes.ZstdChunkedAlgorithmName { + // HACK: Don’t record zstd:chunked algorithms. + // There is already a similar hack in internal/imagedestination/impl/helpers.BlobMatchesRequiredCompression, + // and that one prevents reusing zstd:chunked blobs, so recording the algorithm here would be mostly harmless. + // + // We skip that here anyway to work around the inability of blobPipelineDetectCompressionStep to differentiate + // between zstd and zstd:chunked; so we could, in varying situations over time, call RecordDigestCompressorName + // with the same digest and both ZstdAlgorithmName and ZstdChunkedAlgorithmName , which causes warnings about + // inconsistent data to be logged. + c.blobInfoCache.RecordDigestCompressorName(uploadedInfo.Digest, d.uploadedCompressorName) + } } if srcInfo.Digest != "" && srcInfo.Digest != uploadedInfo.Digest && d.srcCompressorName != "" && d.srcCompressorName != internalblobinfocache.UnknownCompression { - c.blobInfoCache.RecordDigestCompressorName(srcInfo.Digest, d.srcCompressorName) + if d.srcCompressorName != compressiontypes.ZstdChunkedAlgorithmName { + // HACK: Don’t record zstd:chunked algorithms, see above. + c.blobInfoCache.RecordDigestCompressorName(srcInfo.Digest, d.srcCompressorName) + } } return nil } diff --git a/vendor/github.com/containers/image/v5/copy/encryption.go b/vendor/github.com/containers/image/v5/copy/encryption.go index b406b0c3..1305676d 100644 --- a/vendor/github.com/containers/image/v5/copy/encryption.go +++ b/vendor/github.com/containers/image/v5/copy/encryption.go @@ -70,7 +70,7 @@ func (d *bpDecryptionStepData) updateCryptoOperation(operation *types.LayerCrypt } } -// bpdData contains data that the copy pipeline needs about the encryption step. +// bpEncryptionStepData contains data that the copy pipeline needs about the encryption step. type bpEncryptionStepData struct { encrypting bool // We are actually encrypting the stream finalizer ocicrypt.EncryptLayerFinalizer diff --git a/vendor/github.com/containers/image/v5/copy/multiple.go b/vendor/github.com/containers/image/v5/copy/multiple.go index 30f6da25..f252e347 100644 --- a/vendor/github.com/containers/image/v5/copy/multiple.go +++ b/vendor/github.com/containers/image/v5/copy/multiple.go @@ -340,7 +340,7 @@ func (c *copier) copyMultipleImages(ctx context.Context) (copiedManifest []byte, if err != nil { return nil, err } - sigs = append(sigs, newSigs...) + sigs = append(slices.Clone(sigs), newSigs...) c.Printf("Storing list signatures\n") if err := c.dest.PutSignaturesWithFormat(ctx, sigs, nil); err != nil { diff --git a/vendor/github.com/containers/image/v5/copy/single.go b/vendor/github.com/containers/image/v5/copy/single.go index 37b1bfe9..67ca43f7 100644 --- a/vendor/github.com/containers/image/v5/copy/single.go +++ b/vendor/github.com/containers/image/v5/copy/single.go @@ -277,7 +277,7 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar if err != nil { return copySingleImageResult{}, err } - sigs = append(sigs, newSigs...) + sigs = append(slices.Clone(sigs), newSigs...) if len(sigs) > 0 { c.Printf("Storing signatures\n") @@ -380,8 +380,9 @@ func (ic *imageCopier) compareImageDestinationManifestEqual(ctx context.Context, compressionAlgos := set.New[string]() for _, srcInfo := range ic.src.LayerInfos() { - compression := compressionAlgorithmFromMIMEType(srcInfo) - compressionAlgos.Add(compression.Name()) + if c := compressionAlgorithmFromMIMEType(srcInfo); c != nil { + compressionAlgos.Add(c.Name()) + } } algos, err := algorithmsByNames(compressionAlgos.Values()) @@ -743,7 +744,9 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to uploadedBlob, err := ic.c.dest.PutBlobPartial(ctx, &proxy, srcInfo, ic.c.blobInfoCache) if err == nil { if srcInfo.Size != -1 { - bar.SetRefill(srcInfo.Size - bar.Current()) + refill := srcInfo.Size - bar.Current() + bar.SetCurrent(srcInfo.Size) + bar.SetRefill(refill) } bar.mark100PercentComplete() hideProgressBar = false diff --git a/vendor/github.com/containers/image/v5/docker/daemon/client.go b/vendor/github.com/containers/image/v5/docker/daemon/client.go index 2c245f54..354af214 100644 --- a/vendor/github.com/containers/image/v5/docker/daemon/client.go +++ b/vendor/github.com/containers/image/v5/docker/daemon/client.go @@ -9,11 +9,6 @@ import ( "github.com/docker/go-connections/tlsconfig" ) -const ( - // The default API version to be used in case none is explicitly specified - defaultAPIVersion = "1.22" -) - // NewDockerClient initializes a new API client based on the passed SystemContext. func newDockerClient(sys *types.SystemContext) (*dockerclient.Client, error) { host := dockerclient.DefaultDockerHost @@ -23,7 +18,7 @@ func newDockerClient(sys *types.SystemContext) (*dockerclient.Client, error) { opts := []dockerclient.Opt{ dockerclient.WithHost(host), - dockerclient.WithVersion(defaultAPIVersion), + dockerclient.WithAPIVersionNegotiation(), } // We conditionalize building the TLS configuration only to TLS sockets: diff --git a/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go b/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go index 59e02462..55431db1 100644 --- a/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go +++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go @@ -2,6 +2,7 @@ package daemon import ( "context" + "encoding/json" "errors" "fmt" "io" @@ -85,12 +86,40 @@ func imageLoadGoroutine(ctx context.Context, c *client.Client, reader *io.PipeRe } }() + err = imageLoad(ctx, c, reader) +} + +// imageLoad accepts tar stream on reader and sends it to c +func imageLoad(ctx context.Context, c *client.Client, reader *io.PipeReader) error { resp, err := c.ImageLoad(ctx, reader, true) if err != nil { - err = fmt.Errorf("saving image to docker engine: %w", err) - return + return fmt.Errorf("starting a load operation in docker engine: %w", err) } defer resp.Body.Close() + + // jsonError and jsonMessage are small subsets of docker/docker/pkg/jsonmessage.JSONError and JSONMessage, + // copied here to minimize dependencies. + type jsonError struct { + Message string `json:"message,omitempty"` + } + type jsonMessage struct { + Error *jsonError `json:"errorDetail,omitempty"` + } + + dec := json.NewDecoder(resp.Body) + for { + var msg jsonMessage + if err := dec.Decode(&msg); err != nil { + if err == io.EOF { + break + } + return fmt.Errorf("parsing docker load progress: %w", err) + } + if msg.Error != nil { + return fmt.Errorf("docker engine reported: %s", msg.Error.Message) + } + } + return nil // No error reported = success } // DesiredLayerCompression indicates if layers must be compressed, decompressed or preserved diff --git a/vendor/github.com/containers/image/v5/docker/distribution_error.go b/vendor/github.com/containers/image/v5/docker/distribution_error.go index 0fe91524..11b42c6e 100644 --- a/vendor/github.com/containers/image/v5/docker/distribution_error.go +++ b/vendor/github.com/containers/image/v5/docker/distribution_error.go @@ -24,6 +24,7 @@ import ( "github.com/docker/distribution/registry/api/errcode" dockerChallenge "github.com/docker/distribution/registry/client/auth/challenge" + "golang.org/x/exp/slices" ) // errNoErrorsInBody is returned when an HTTP response body parses to an empty @@ -105,7 +106,7 @@ func makeErrorList(err error) []error { } func mergeErrors(err1, err2 error) error { - return errcode.Errors(append(makeErrorList(err1), makeErrorList(err2)...)) + return errcode.Errors(append(slices.Clone(makeErrorList(err1)), makeErrorList(err2)...)) } // handleErrorResponse returns error parsed from HTTP response for an diff --git a/vendor/github.com/containers/image/v5/docker/docker_client.go b/vendor/github.com/containers/image/v5/docker/docker_client.go index 288dd1a9..6ce8f700 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_client.go +++ b/vendor/github.com/containers/image/v5/docker/docker_client.go @@ -363,6 +363,11 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima hostname := registry if registry == dockerHostname { hostname = dockerV1Hostname + // A search term of library/foo does not find the library/foo image on the docker.io servers, + // which is surprising - and that Docker is modifying the search term client-side this same way, + // and it seems convenient to do the same thing. + // Read more here: https://github.com/containers/image/pull/2133#issue-1928524334 + image = strings.TrimPrefix(image, "library/") } client, err := newDockerClient(sys, hostname, registry) diff --git a/vendor/github.com/containers/image/v5/docker/docker_image.go b/vendor/github.com/containers/image/v5/docker/docker_image.go index 42bbfd95..93160480 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image.go @@ -123,6 +123,9 @@ func GetDigest(ctx context.Context, sys *types.SystemContext, ref types.ImageRef if !ok { return "", errors.New("ref must be a dockerReference") } + if dr.isUnknownDigest { + return "", fmt.Errorf("docker: reference %q is for unknown digest case; cannot get digest", dr.StringWithinTransport()) + } tagOrDigest, err := dr.tagOrDigest() if err != nil { diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go index 0e7b154c..a9a36f0a 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go @@ -137,7 +137,7 @@ func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream // If requested, precompute the blob digest to prevent uploading layers that already exist on the registry. // This functionality is particularly useful when BlobInfoCache has not been populated with compressed digests, // the source blob is uncompressed, and the destination blob is being compressed "on the fly". - if inputInfo.Digest == "" && d.c.sys.DockerRegistryPushPrecomputeDigests { + if inputInfo.Digest == "" && d.c.sys != nil && d.c.sys.DockerRegistryPushPrecomputeDigests { logrus.Debugf("Precomputing digest layer for %s", reference.Path(d.ref.ref)) streamCopy, cleanup, err := streamdigest.ComputeBlobInfo(d.c.sys, stream, &inputInfo) if err != nil { @@ -341,39 +341,58 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, // Then try reusing blobs from other locations. candidates := options.Cache.CandidateLocations2(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, options.CanSubstitute) for _, candidate := range candidates { - candidateRepo, err := parseBICLocationReference(candidate.Location) - if err != nil { - logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err) - continue - } + var err error compressionOperation, compressionAlgorithm, err := blobinfocache.OperationAndAlgorithmForCompressor(candidate.CompressorName) if err != nil { logrus.Debugf("OperationAndAlgorithmForCompressor Failed: %v", err) continue } + var candidateRepo reference.Named + if !candidate.UnknownLocation { + candidateRepo, err = parseBICLocationReference(candidate.Location) + if err != nil { + logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err) + continue + } + } if !impl.BlobMatchesRequiredCompression(options, compressionAlgorithm) { requiredCompression := "nil" if compressionAlgorithm != nil { requiredCompression = compressionAlgorithm.Name() } - logrus.Debugf("Ignoring candidate blob %s as reuse candidate due to compression mismatch ( %s vs %s ) in %s", candidate.Digest.String(), options.RequiredCompression.Name(), requiredCompression, candidateRepo.Name()) + if !candidate.UnknownLocation { + logrus.Debugf("Ignoring candidate blob %s as reuse candidate due to compression mismatch ( %s vs %s ) in %s", candidate.Digest.String(), options.RequiredCompression.Name(), requiredCompression, candidateRepo.Name()) + } else { + logrus.Debugf("Ignoring candidate blob %s as reuse candidate due to compression mismatch ( %s vs %s ) with no location match, checking current repo", candidate.Digest.String(), options.RequiredCompression.Name(), requiredCompression) + } continue } - if candidate.CompressorName != blobinfocache.Uncompressed { - logrus.Debugf("Trying to reuse cached location %s compressed with %s in %s", candidate.Digest.String(), candidate.CompressorName, candidateRepo.Name()) + if !candidate.UnknownLocation { + if candidate.CompressorName != blobinfocache.Uncompressed { + logrus.Debugf("Trying to reuse blob with cached digest %s compressed with %s in destination repo %s", candidate.Digest.String(), candidate.CompressorName, candidateRepo.Name()) + } else { + logrus.Debugf("Trying to reuse blob with cached digest %s in destination repo %s", candidate.Digest.String(), candidateRepo.Name()) + } + // Sanity checks: + if reference.Domain(candidateRepo) != reference.Domain(d.ref.ref) { + // OCI distribution spec 1.1 allows mounting blobs without specifying the source repo + // (the "from" parameter); in that case we might try to use these candidates as well. + // + // OTOH that would mean we can’t do the “blobExists†check, and if there is no match + // we could get an upload request that we would have to cancel. + logrus.Debugf("... Internal error: domain %s does not match destination %s", reference.Domain(candidateRepo), reference.Domain(d.ref.ref)) + continue + } } else { - logrus.Debugf("Trying to reuse cached location %s with no compression in %s", candidate.Digest.String(), candidateRepo.Name()) - } - - // Sanity checks: - if reference.Domain(candidateRepo) != reference.Domain(d.ref.ref) { - // OCI distribution spec 1.1 allows mounting blobs without specifying the source repo - // (the "from" parameter); in that case we might try to use these candidates as well. - // - // OTOH that would mean we can’t do the “blobExists†check, and if there is no match - // we could get an upload request that we would have to cancel. - logrus.Debugf("... Internal error: domain %s does not match destination %s", reference.Domain(candidateRepo), reference.Domain(d.ref.ref)) - continue + if candidate.CompressorName != blobinfocache.Uncompressed { + logrus.Debugf("Trying to reuse blob with cached digest %s compressed with %s with no location match, checking current repo", candidate.Digest.String(), candidate.CompressorName) + } else { + logrus.Debugf("Trying to reuse blob with cached digest %s in destination repo with no location match, checking current repo", candidate.Digest.String()) + } + // This digest is a known variant of this blob but we don’t + // have a recorded location in this registry, let’s try looking + // for it in the current repo. + candidateRepo = reference.TrimNamed(d.ref.ref) } if candidateRepo.Name() == d.ref.ref.Name() && candidate.Digest == info.Digest { logrus.Debug("... Already tried the primary destination") @@ -433,7 +452,15 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, // but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error { var refTail string - if instanceDigest != nil { + // If d.ref.isUnknownDigest=true, then we push without a tag, so get the + // digest that will be used + if d.ref.isUnknownDigest { + digest, err := manifest.Digest(m) + if err != nil { + return err + } + refTail = digest.String() + } else if instanceDigest != nil { // If the instanceDigest is provided, then use it as the refTail, because the reference, // whether it includes a tag or a digest, refers to the list as a whole, and not this // particular instance. @@ -688,6 +715,10 @@ func (d *dockerImageDestination) putSignaturesToSigstoreAttachments(ctx context. } } + // To make sure we can safely append to the slices of ociManifest, without adding a remote dependency on the code that creates it. + ociManifest.Layers = slices.Clone(ociManifest.Layers) + // We don’t need to ^^^ for ociConfig.RootFS.DiffIDs because we have created it empty ourselves, and json.Unmarshal is documented to append() to + // the slice in the original object (or in a newly allocated object). for _, sig := range signatures { mimeType := sig.UntrustedMIMEType() payloadBlob := sig.UntrustedPayload() diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_src.go b/vendor/github.com/containers/image/v5/docker/docker_image_src.go index 231d5d21..f9d4d603 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_src.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_src.go @@ -38,8 +38,8 @@ type dockerImageSource struct { impl.DoesNotAffectLayerInfosForCopy stubs.ImplementsGetBlobAt - logicalRef dockerReference // The reference the user requested. - physicalRef dockerReference // The actual reference we are accessing (possibly a mirror) + logicalRef dockerReference // The reference the user requested. This must satisfy !isUnknownDigest + physicalRef dockerReference // The actual reference we are accessing (possibly a mirror). This must satisfy !isUnknownDigest c *dockerClient // State cachedManifest []byte // nil if not loaded yet @@ -48,7 +48,12 @@ type dockerImageSource struct { // newImageSource creates a new ImageSource for the specified image reference. // The caller must call .Close() on the returned ImageSource. +// The caller must ensure !ref.isUnknownDigest. func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerReference) (*dockerImageSource, error) { + if ref.isUnknownDigest { + return nil, fmt.Errorf("reading images from docker: reference %q without a tag or digest is not supported", ref.StringWithinTransport()) + } + registryConfig, err := loadRegistryConfiguration(sys) if err != nil { return nil, err @@ -121,7 +126,7 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerRef // The caller must call .Close() on the returned ImageSource. func newImageSourceAttempt(ctx context.Context, sys *types.SystemContext, logicalRef dockerReference, pullSource sysregistriesv2.PullSource, registryConfig *registryConfiguration) (*dockerImageSource, error) { - physicalRef, err := newReference(pullSource.Reference) + physicalRef, err := newReference(pullSource.Reference, false) if err != nil { return nil, err } @@ -591,6 +596,10 @@ func (s *dockerImageSource) getSignaturesFromSigstoreAttachments(ctx context.Con // deleteImage deletes the named image from the registry, if supported. func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerReference) error { + if ref.isUnknownDigest { + return fmt.Errorf("Docker reference without a tag or digest cannot be deleted") + } + registryConfig, err := loadRegistryConfiguration(sys) if err != nil { return err diff --git a/vendor/github.com/containers/image/v5/docker/docker_transport.go b/vendor/github.com/containers/image/v5/docker/docker_transport.go index 6ae84915..1c89302f 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_transport.go +++ b/vendor/github.com/containers/image/v5/docker/docker_transport.go @@ -12,6 +12,11 @@ import ( "github.com/containers/image/v5/types" ) +// UnknownDigestSuffix can be appended to a reference when the caller +// wants to push an image without a tag or digest. +// NewReferenceUnknownDigest() is called when this const is detected. +const UnknownDigestSuffix = "@@unknown-digest@@" + func init() { transports.Register(Transport) } @@ -43,7 +48,8 @@ func (t dockerTransport) ValidatePolicyConfigurationScope(scope string) error { // dockerReference is an ImageReference for Docker images. type dockerReference struct { - ref reference.Named // By construction we know that !reference.IsNameOnly(ref) + ref reference.Named // By construction we know that !reference.IsNameOnly(ref) unless isUnknownDigest=true + isUnknownDigest bool } // ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference. @@ -51,23 +57,46 @@ func ParseReference(refString string) (types.ImageReference, error) { if !strings.HasPrefix(refString, "//") { return nil, fmt.Errorf("docker: image reference %s does not start with //", refString) } + // Check if ref has UnknownDigestSuffix suffixed to it + unknownDigest := false + if strings.HasSuffix(refString, UnknownDigestSuffix) { + unknownDigest = true + refString = strings.TrimSuffix(refString, UnknownDigestSuffix) + } ref, err := reference.ParseNormalizedNamed(strings.TrimPrefix(refString, "//")) if err != nil { return nil, err } + + if unknownDigest { + if !reference.IsNameOnly(ref) { + return nil, fmt.Errorf("docker: image reference %q has unknown digest set but it contains either a tag or digest", ref.String()+UnknownDigestSuffix) + } + return NewReferenceUnknownDigest(ref) + } + ref = reference.TagNameOnly(ref) return NewReference(ref) } // NewReference returns a Docker reference for a named reference. The reference must satisfy !reference.IsNameOnly(). func NewReference(ref reference.Named) (types.ImageReference, error) { - return newReference(ref) + return newReference(ref, false) +} + +// NewReferenceUnknownDigest returns a Docker reference for a named reference, which can be used to write images without setting +// a tag on the registry. The reference must satisfy reference.IsNameOnly() +func NewReferenceUnknownDigest(ref reference.Named) (types.ImageReference, error) { + return newReference(ref, true) } // newReference returns a dockerReference for a named reference. -func newReference(ref reference.Named) (dockerReference, error) { - if reference.IsNameOnly(ref) { - return dockerReference{}, fmt.Errorf("Docker reference %s has neither a tag nor a digest", reference.FamiliarString(ref)) +func newReference(ref reference.Named, unknownDigest bool) (dockerReference, error) { + if reference.IsNameOnly(ref) && !unknownDigest { + return dockerReference{}, fmt.Errorf("Docker reference %s is not for an unknown digest case; tag or digest is needed", reference.FamiliarString(ref)) + } + if !reference.IsNameOnly(ref) && unknownDigest { + return dockerReference{}, fmt.Errorf("Docker reference %s is for an unknown digest case but reference has a tag or digest", reference.FamiliarString(ref)) } // A github.com/distribution/reference value can have a tag and a digest at the same time! // The docker/distribution API does not really support that (we can’t ask for an image with a specific @@ -81,7 +110,8 @@ func newReference(ref reference.Named) (dockerReference, error) { } return dockerReference{ - ref: ref, + ref: ref, + isUnknownDigest: unknownDigest, }, nil } @@ -95,7 +125,11 @@ func (ref dockerReference) Transport() types.ImageTransport { // e.g. default attribute values omitted by the user may be filled in the return value, or vice versa. // WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. func (ref dockerReference) StringWithinTransport() string { - return "//" + reference.FamiliarString(ref.ref) + famString := "//" + reference.FamiliarString(ref.ref) + if ref.isUnknownDigest { + return famString + UnknownDigestSuffix + } + return famString } // DockerReference returns a Docker reference associated with this reference @@ -113,6 +147,9 @@ func (ref dockerReference) DockerReference() reference.Named { // not required/guaranteed that it will be a valid input to Transport().ParseReference(). // Returns "" if configuration identities for these references are not supported. func (ref dockerReference) PolicyConfigurationIdentity() string { + if ref.isUnknownDigest { + return ref.ref.Name() + } res, err := policyconfiguration.DockerReferenceIdentity(ref.ref) if res == "" || err != nil { // Coverage: Should never happen, NewReference above should refuse values which could cause a failure. panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err)) @@ -126,7 +163,13 @@ func (ref dockerReference) PolicyConfigurationIdentity() string { // It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), // and each following element to be a prefix of the element preceding it. func (ref dockerReference) PolicyConfigurationNamespaces() []string { - return policyconfiguration.DockerReferenceNamespaces(ref.ref) + namespaces := policyconfiguration.DockerReferenceNamespaces(ref.ref) + if ref.isUnknownDigest { + if len(namespaces) != 0 && namespaces[0] == ref.ref.Name() { + namespaces = namespaces[1:] + } + } + return namespaces } // NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. @@ -163,6 +206,10 @@ func (ref dockerReference) tagOrDigest() (string, error) { if ref, ok := ref.ref.(reference.NamedTagged); ok { return ref.Tag(), nil } + + if ref.isUnknownDigest { + return "", fmt.Errorf("Docker reference %q is for an unknown digest case, has neither a digest nor a tag", reference.FamiliarString(ref.ref)) + } // This should not happen, NewReference above refuses reference.IsNameOnly values. return "", fmt.Errorf("Internal inconsistency: Reference %s unexpectedly has neither a digest nor a tag", reference.FamiliarString(ref.ref)) } diff --git a/vendor/github.com/containers/image/v5/docker/errors.go b/vendor/github.com/containers/image/v5/docker/errors.go index e0396918..4392f9d1 100644 --- a/vendor/github.com/containers/image/v5/docker/errors.go +++ b/vendor/github.com/containers/image/v5/docker/errors.go @@ -88,7 +88,7 @@ func registryHTTPResponseToError(res *http.Response) error { response = response[:50] + "..." } // %.0w makes e visible to error.Unwrap() without including any text - err = fmt.Errorf("StatusCode: %d, %s%.0w", e.StatusCode, response, e) + err = fmt.Errorf("StatusCode: %d, %q%.0w", e.StatusCode, response, e) case errcode.Error: // e.Error() is fmt.Sprintf("%s: %s", e.Code.Error(), e.Message, which is usually // rather redundant. So reword it without using e.Code.Error() if e.Message is the default. diff --git a/vendor/github.com/containers/image/v5/image/unparsed.go b/vendor/github.com/containers/image/v5/image/unparsed.go index 123f6ce6..f2ebb929 100644 --- a/vendor/github.com/containers/image/v5/image/unparsed.go +++ b/vendor/github.com/containers/image/v5/image/unparsed.go @@ -2,6 +2,8 @@ package image import ( "github.com/containers/image/v5/internal/image" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/unparsedimage" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" ) @@ -17,3 +19,23 @@ type UnparsedImage = image.UnparsedImage func UnparsedInstance(src types.ImageSource, instanceDigest *digest.Digest) *UnparsedImage { return image.UnparsedInstance(src, instanceDigest) } + +// unparsedWithRef wraps a private.UnparsedImage, claiming another replacementRef +type unparsedWithRef struct { + private.UnparsedImage + ref types.ImageReference +} + +func (uwr *unparsedWithRef) Reference() types.ImageReference { + return uwr.ref +} + +// UnparsedInstanceWithReference returns a types.UnparsedImage for wrappedInstance which claims to be a replacementRef. +// This is useful for combining image data with other reference values, e.g. to check signatures on a locally-pulled image +// based on a remote-registry policy. +func UnparsedInstanceWithReference(wrappedInstance types.UnparsedImage, replacementRef types.ImageReference) types.UnparsedImage { + return &unparsedWithRef{ + UnparsedImage: unparsedimage.FromPublic(wrappedInstance), + ref: replacementRef, + } +} diff --git a/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go b/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go index fdd24581..429d6826 100644 --- a/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go +++ b/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go @@ -32,7 +32,7 @@ type BlobInfoCache2 interface { // otherwise the cache could be poisoned and cause us to make incorrect edits to type // information in a manifest. RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) - // CandidateLocations2 returns a prioritized, limited, number of blobs and their locations + // CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known) // that could possibly be reused within the specified (transport scope) (if they still // exist, which is not guaranteed). // @@ -46,7 +46,8 @@ type BlobInfoCache2 interface { // BICReplacementCandidate2 is an item returned by BlobInfoCache2.CandidateLocations2. type BICReplacementCandidate2 struct { - Digest digest.Digest - CompressorName string // either the Name() of a known pkg/compression.Algorithm, or Uncompressed or UnknownCompression - Location types.BICLocationReference + Digest digest.Digest + CompressorName string // either the Name() of a known pkg/compression.Algorithm, or Uncompressed or UnknownCompression + UnknownLocation bool // is true when `Location` for this blob is not set + Location types.BICLocationReference // not set if UnknownLocation is set to `true` } diff --git a/vendor/github.com/containers/image/v5/internal/image/oci.go b/vendor/github.com/containers/image/v5/internal/image/oci.go index 6629967b..df0e8e41 100644 --- a/vendor/github.com/containers/image/v5/internal/image/oci.go +++ b/vendor/github.com/containers/image/v5/internal/image/oci.go @@ -196,14 +196,12 @@ func (m *manifestOCI1) convertToManifestSchema2Generic(ctx context.Context, opti return m.convertToManifestSchema2(ctx, options) } -// prepareLayerDecryptEditsIfNecessary checks if options requires layer decryptions. +// layerEditsOfOCIOnlyFeatures checks if options requires some layer edits to be done before converting to a Docker format. // If not, it returns (nil, nil). // If decryption is required, it returns a set of edits to provide to OCI1.UpdateLayerInfos, // and edits *options to not try decryption again. -func (m *manifestOCI1) prepareLayerDecryptEditsIfNecessary(options *types.ManifestUpdateOptions) ([]types.BlobInfo, error) { - if options == nil || !slices.ContainsFunc(options.LayerInfos, func(info types.BlobInfo) bool { - return info.CryptoOperation == types.Decrypt - }) { +func (m *manifestOCI1) layerEditsOfOCIOnlyFeatures(options *types.ManifestUpdateOptions) ([]types.BlobInfo, error) { + if options == nil || options.LayerInfos == nil { return nil, nil } @@ -212,19 +210,35 @@ func (m *manifestOCI1) prepareLayerDecryptEditsIfNecessary(options *types.Manife return nil, fmt.Errorf("preparing to decrypt before conversion: %d layers vs. %d layer edits", len(originalInfos), len(options.LayerInfos)) } - res := slices.Clone(originalInfos) // Start with a full copy so that we don't forget to copy anything: use the current data in full unless we intentionaly deviate. - updatedEdits := slices.Clone(options.LayerInfos) - for i, info := range options.LayerInfos { - if info.CryptoOperation == types.Decrypt { - res[i].CryptoOperation = types.Decrypt - updatedEdits[i].CryptoOperation = types.PreserveOriginalCrypto // Don't try to decrypt in a schema[12] manifest later, that would fail. + ociOnlyEdits := slices.Clone(originalInfos) // Start with a full copy so that we don't forget to copy anything: use the current data in full unless we intentionally deviate. + laterEdits := slices.Clone(options.LayerInfos) + needsOCIOnlyEdits := false + for i, edit := range options.LayerInfos { + // Unless determined otherwise, don't do any compression-related MIME type conversions. m.LayerInfos() should not set these edit instructions, but be explicit. + ociOnlyEdits[i].CompressionOperation = types.PreserveOriginal + ociOnlyEdits[i].CompressionAlgorithm = nil + + if edit.CryptoOperation == types.Decrypt { + needsOCIOnlyEdits = true // Encrypted types must be removed before conversion because they can’t be represented in Docker schemas + ociOnlyEdits[i].CryptoOperation = types.Decrypt + laterEdits[i].CryptoOperation = types.PreserveOriginalCrypto // Don't try to decrypt in a schema[12] manifest later, that would fail. + } + + if originalInfos[i].MediaType == imgspecv1.MediaTypeImageLayerZstd || + originalInfos[i].MediaType == imgspecv1.MediaTypeImageLayerNonDistributableZstd { //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. + needsOCIOnlyEdits = true // Zstd MIME types must be removed before conversion because they can’t be represented in Docker schemas. + ociOnlyEdits[i].CompressionOperation = edit.CompressionOperation + ociOnlyEdits[i].CompressionAlgorithm = edit.CompressionAlgorithm + laterEdits[i].CompressionOperation = types.PreserveOriginal + laterEdits[i].CompressionAlgorithm = nil } - // Don't do any compression-related MIME type conversions. m.LayerInfos() should not set these edit instructions, but be explicit. - res[i].CompressionOperation = types.PreserveOriginal - res[i].CompressionAlgorithm = nil } - options.LayerInfos = updatedEdits - return res, nil + if !needsOCIOnlyEdits { + return nil, nil + } + + options.LayerInfos = laterEdits + return ociOnlyEdits, nil } // convertToManifestSchema2 returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType. @@ -238,15 +252,15 @@ func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, options *type // Mostly we first make a format conversion, and _afterwards_ do layer edits. But first we need to do the layer edits // which remove OCI-specific features, because trying to convert those layers would fail. - // So, do the layer updates for decryption. + // So, do the layer updates for decryption, and for conversions from Zstd. ociManifest := m.m - layerDecryptEdits, err := m.prepareLayerDecryptEditsIfNecessary(options) + ociOnlyEdits, err := m.layerEditsOfOCIOnlyFeatures(options) if err != nil { return nil, err } - if layerDecryptEdits != nil { + if ociOnlyEdits != nil { ociManifest = manifest.OCI1Clone(ociManifest) - if err := ociManifest.UpdateLayerInfos(layerDecryptEdits); err != nil { + if err := ociManifest.UpdateLayerInfos(ociOnlyEdits); err != nil { return nil, err } } @@ -275,9 +289,8 @@ func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, options *type layers[idx].MediaType = manifest.DockerV2Schema2LayerMediaType case imgspecv1.MediaTypeImageLayerZstd: return nil, fmt.Errorf("Error during manifest conversion: %q: zstd compression is not supported for docker images", layers[idx].MediaType) - // FIXME: s/Zsdt/Zstd/ after ocicrypt with https://github.com/containers/ocicrypt/pull/91 is released case ociencspec.MediaTypeLayerEnc, ociencspec.MediaTypeLayerGzipEnc, ociencspec.MediaTypeLayerZstdEnc, - ociencspec.MediaTypeLayerNonDistributableEnc, ociencspec.MediaTypeLayerNonDistributableGzipEnc, ociencspec.MediaTypeLayerNonDistributableZsdtEnc: + ociencspec.MediaTypeLayerNonDistributableEnc, ociencspec.MediaTypeLayerNonDistributableGzipEnc, ociencspec.MediaTypeLayerNonDistributableZstdEnc: return nil, fmt.Errorf("during manifest conversion: encrypted layers (%q) are not supported in docker images", layers[idx].MediaType) default: return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", layers[idx].MediaType) diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/impl/helpers.go b/vendor/github.com/containers/image/v5/internal/imagedestination/impl/helpers.go index d5de81a6..5d28b3e7 100644 --- a/vendor/github.com/containers/image/v5/internal/imagedestination/impl/helpers.go +++ b/vendor/github.com/containers/image/v5/internal/imagedestination/impl/helpers.go @@ -12,6 +12,11 @@ func BlobMatchesRequiredCompression(options private.TryReusingBlobOptions, candi if options.RequiredCompression == nil { return true // no requirement imposed } + if options.RequiredCompression.Name() == compression.ZstdChunkedAlgorithmName { + // HACK: Never match when the caller asks for zstd:chunked, because we don’t record the annotations required to use the chunked blobs. + // The caller must re-compress to build those annotations. + return false + } return candidateCompression != nil && (options.RequiredCompression.Name() == candidateCompression.Name()) } diff --git a/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go b/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go index 357e2f3d..7ce5bb06 100644 --- a/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go +++ b/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go @@ -133,7 +133,9 @@ func (index *Schema2ListPublic) editInstances(editInstances []ListEdit) error { } } if len(addedEntries) != 0 { - index.Manifests = append(index.Manifests, addedEntries...) + // slices.Clone() here to ensure a private backing array; + // an external caller could have manually created Schema2ListPublic with a slice with extra capacity. + index.Manifests = append(slices.Clone(index.Manifests), addedEntries...) } return nil } diff --git a/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go b/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go index dcd2646d..d8d06513 100644 --- a/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go +++ b/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go @@ -167,7 +167,9 @@ func (index *OCI1IndexPublic) editInstances(editInstances []ListEdit) error { } } if len(addedEntries) != 0 { - index.Manifests = append(index.Manifests, addedEntries...) + // slices.Clone() here to ensure the slice uses a private backing array; + // an external caller could have manually created OCI1IndexPublic with a slice with extra capacity. + index.Manifests = append(slices.Clone(index.Manifests), addedEntries...) } if len(addedEntries) != 0 || updatedAnnotations { slices.SortStableFunc(index.Manifests, func(a, b imgspecv1.Descriptor) int { @@ -220,7 +222,7 @@ func (ic instanceCandidate) isPreferredOver(other *instanceCandidate, preferGzip case ic.manifestPosition != other.manifestPosition: return ic.manifestPosition < other.manifestPosition } - panic("internal error: invalid comparision between two candidates") // This should not be reachable because in all calls we make, the two candidates differ at least in manifestPosition. + panic("internal error: invalid comparison between two candidates") // This should not be reachable because in all calls we make, the two candidates differ at least in manifestPosition. } // chooseInstance is a private equivalent to ChooseInstanceByCompression, diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_src.go b/vendor/github.com/containers/image/v5/oci/archive/oci_src.go index 6c9ee334..ee840989 100644 --- a/vendor/github.com/containers/image/v5/oci/archive/oci_src.go +++ b/vendor/github.com/containers/image/v5/oci/archive/oci_src.go @@ -28,6 +28,18 @@ func (e ImageNotFoundError) Error() string { return fmt.Sprintf("no descriptor found for reference %q", e.ref.image) } +// ArchiveFileNotFoundError occurs when the archive file does not exist. +type ArchiveFileNotFoundError struct { + // ref is the image reference + ref ociArchiveReference + // path is the file path that was not present + path string +} + +func (e ArchiveFileNotFoundError) Error() string { + return fmt.Sprintf("archive file not found: %q", e.path) +} + type ociArchiveImageSource struct { impl.Compat diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go b/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go index 2a03feee..d5fee363 100644 --- a/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go +++ b/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "io/fs" "os" "strings" @@ -171,18 +172,24 @@ func createOCIRef(sys *types.SystemContext, image string) (tempDirOCIRef, error) // creates the temporary directory and copies the tarred content to it func createUntarTempDir(sys *types.SystemContext, ref ociArchiveReference) (tempDirOCIRef, error) { + src := ref.resolvedFile + arch, err := os.Open(src) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + return tempDirOCIRef{}, ArchiveFileNotFoundError{ref: ref, path: src} + } else { + return tempDirOCIRef{}, err + } + } + defer arch.Close() + tempDirRef, err := createOCIRef(sys, ref.image) if err != nil { return tempDirOCIRef{}, fmt.Errorf("creating oci reference: %w", err) } - src := ref.resolvedFile dst := tempDirRef.tempDirectory + // TODO: This can take quite some time, and should ideally be cancellable using a context.Context. - arch, err := os.Open(src) - if err != nil { - return tempDirOCIRef{}, err - } - defer arch.Close() if err := archive.NewDefaultArchiver().Untar(arch, dst, &archive.TarOptions{NoLchown: true}); err != nil { if err := tempDirRef.deleteTempDir(); err != nil { return tempDirOCIRef{}, fmt.Errorf("deleting temp directory %q: %w", tempDirRef.tempDirectory, err) diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_delete.go b/vendor/github.com/containers/image/v5/oci/layout/oci_delete.go new file mode 100644 index 00000000..8dd54f25 --- /dev/null +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_delete.go @@ -0,0 +1,240 @@ +package layout + +import ( + "context" + "encoding/json" + "fmt" + "io/fs" + "os" + + "github.com/containers/image/v5/internal/set" + "github.com/containers/image/v5/types" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" +) + +// DeleteImage deletes the named image from the directory, if supported. +func (ref ociReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + sharedBlobsDir := "" + if sys != nil && sys.OCISharedBlobDirPath != "" { + sharedBlobsDir = sys.OCISharedBlobDirPath + } + + descriptor, descriptorIndex, err := ref.getManifestDescriptor() + if err != nil { + return err + } + + var blobsUsedByImage map[digest.Digest]int + + switch descriptor.MediaType { + case imgspecv1.MediaTypeImageManifest: + blobsUsedByImage, err = ref.getBlobsUsedInSingleImage(&descriptor, sharedBlobsDir) + case imgspecv1.MediaTypeImageIndex: + blobsUsedByImage, err = ref.getBlobsUsedInImageIndex(&descriptor, sharedBlobsDir) + default: + return fmt.Errorf("unsupported mediaType in index: %q", descriptor.MediaType) + } + if err != nil { + return err + } + + blobsToDelete, err := ref.getBlobsToDelete(blobsUsedByImage, sharedBlobsDir) + if err != nil { + return err + } + + err = ref.deleteBlobs(blobsToDelete) + if err != nil { + return err + } + + return ref.deleteReferenceFromIndex(descriptorIndex) +} + +func (ref ociReference) getBlobsUsedInSingleImage(descriptor *imgspecv1.Descriptor, sharedBlobsDir string) (map[digest.Digest]int, error) { + manifest, err := ref.getManifest(descriptor, sharedBlobsDir) + if err != nil { + return nil, err + } + blobsUsedInManifest := ref.getBlobsUsedInManifest(manifest) + blobsUsedInManifest[descriptor.Digest]++ // Add the current manifest to the list of blobs used by this reference + + return blobsUsedInManifest, nil +} + +func (ref ociReference) getBlobsUsedInImageIndex(descriptor *imgspecv1.Descriptor, sharedBlobsDir string) (map[digest.Digest]int, error) { + blobPath, err := ref.blobPath(descriptor.Digest, sharedBlobsDir) + if err != nil { + return nil, err + } + index, err := parseIndex(blobPath) + if err != nil { + return nil, err + } + + blobsUsedInImageRefIndex := make(map[digest.Digest]int) + err = ref.addBlobsUsedInIndex(blobsUsedInImageRefIndex, index, sharedBlobsDir) + if err != nil { + return nil, err + } + blobsUsedInImageRefIndex[descriptor.Digest]++ // Add the nested index in the list of blobs used by this reference + + return blobsUsedInImageRefIndex, nil +} + +// Updates a map of digest with the usage count, so a blob that is referenced three times will have 3 in the map +func (ref ociReference) addBlobsUsedInIndex(destination map[digest.Digest]int, index *imgspecv1.Index, sharedBlobsDir string) error { + for _, descriptor := range index.Manifests { + destination[descriptor.Digest]++ + switch descriptor.MediaType { + case imgspecv1.MediaTypeImageManifest: + manifest, err := ref.getManifest(&descriptor, sharedBlobsDir) + if err != nil { + return err + } + for digest, count := range ref.getBlobsUsedInManifest(manifest) { + destination[digest] += count + } + case imgspecv1.MediaTypeImageIndex: + blobPath, err := ref.blobPath(descriptor.Digest, sharedBlobsDir) + if err != nil { + return err + } + index, err := parseIndex(blobPath) + if err != nil { + return err + } + err = ref.addBlobsUsedInIndex(destination, index, sharedBlobsDir) + if err != nil { + return err + } + default: + return fmt.Errorf("unsupported mediaType in index: %q", descriptor.MediaType) + } + } + + return nil +} + +func (ref ociReference) getBlobsUsedInManifest(manifest *imgspecv1.Manifest) map[digest.Digest]int { + blobsUsedInManifest := make(map[digest.Digest]int, 0) + + blobsUsedInManifest[manifest.Config.Digest]++ + for _, layer := range manifest.Layers { + blobsUsedInManifest[layer.Digest]++ + } + + return blobsUsedInManifest +} + +// This takes in a map of the digest and their usage count in the manifest to be deleted +// It will compare it to the digest usage in the root index, and return a set of the blobs that can be safely deleted +func (ref ociReference) getBlobsToDelete(blobsUsedByDescriptorToDelete map[digest.Digest]int, sharedBlobsDir string) (*set.Set[digest.Digest], error) { + rootIndex, err := ref.getIndex() + if err != nil { + return nil, err + } + blobsUsedInRootIndex := make(map[digest.Digest]int) + err = ref.addBlobsUsedInIndex(blobsUsedInRootIndex, rootIndex, sharedBlobsDir) + if err != nil { + return nil, err + } + + blobsToDelete := set.New[digest.Digest]() + + for digest, count := range blobsUsedInRootIndex { + if count-blobsUsedByDescriptorToDelete[digest] == 0 { + blobsToDelete.Add(digest) + } + } + + return blobsToDelete, nil +} + +// This transport never generates layouts where blobs for an image are both in the local blobs directory +// and the shared one; it’s either one or the other, depending on how OCISharedBlobDirPath is set. +// +// But we can’t correctly compute use counts for OCISharedBlobDirPath (because we don't know what +// the other layouts sharing that directory are, and we might not even have permission to read them), +// so we can’t really delete any blobs in that case. +// Checking the _local_ blobs directory, and deleting blobs from there, doesn't really hurt, +// in case the layout was created using some other tool or without OCISharedBlobDirPath set, so let's silently +// check for local blobs (but we should make no noise if the blobs are actually in the shared directory). +// +// So, NOTE: the blobPath() call below hard-codes "" even in calls where OCISharedBlobDirPath is set +func (ref ociReference) deleteBlobs(blobsToDelete *set.Set[digest.Digest]) error { + for _, digest := range blobsToDelete.Values() { + blobPath, err := ref.blobPath(digest, "") //Only delete in the local directory, see comment above + if err != nil { + return err + } + err = deleteBlob(blobPath) + if err != nil { + return err + } + } + + return nil +} + +func deleteBlob(blobPath string) error { + logrus.Debug(fmt.Sprintf("Deleting blob at %q", blobPath)) + + err := os.Remove(blobPath) + if err != nil && !os.IsNotExist(err) { + return err + } else { + return nil + } +} + +func (ref ociReference) deleteReferenceFromIndex(referenceIndex int) error { + index, err := ref.getIndex() + if err != nil { + return err + } + + index.Manifests = slices.Delete(index.Manifests, referenceIndex, referenceIndex+1) + + return saveJSON(ref.indexPath(), index) +} + +func saveJSON(path string, content any) error { + // If the file already exists, get its mode to preserve it + var mode fs.FileMode + existingfi, err := os.Stat(path) + if err != nil { + if !os.IsNotExist(err) { + return err + } else { // File does not exist, use default mode + mode = 0644 + } + } else { + mode = existingfi.Mode() + } + + file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, mode) + if err != nil { + return err + } + defer file.Close() + + return json.NewEncoder(file).Encode(content) +} + +func (ref ociReference) getManifest(descriptor *imgspecv1.Descriptor, sharedBlobsDir string) (*imgspecv1.Manifest, error) { + manifestPath, err := ref.blobPath(descriptor.Digest, sharedBlobsDir) + if err != nil { + return nil, err + } + + manifest, err := parseJSON[imgspecv1.Manifest](manifestPath) + if err != nil { + return nil, err + } + + return manifest, nil +} diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go index 8ff43d44..100d1676 100644 --- a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go @@ -19,6 +19,7 @@ import ( digest "github.com/opencontainers/go-digest" imgspec "github.com/opencontainers/image-spec/specs-go" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/exp/slices" ) type ociImageDestination struct { @@ -84,7 +85,7 @@ func newImageDestination(sys *types.SystemContext, ref ociReference) (private.Im // Per the OCI image specification, layouts MUST have a "blobs" subdirectory, // but it MAY be empty (e.g. if we never end up calling PutBlob) // https://github.com/opencontainers/image-spec/blame/7c889fafd04a893f5c5f50b7ab9963d5d64e5242/image-layout.md#L19 - if err := ensureDirectoryExists(filepath.Join(d.ref.dir, "blobs")); err != nil { + if err := ensureDirectoryExists(filepath.Join(d.ref.dir, imgspecv1.ImageBlobsDir)); err != nil { return nil, err } return d, nil @@ -271,8 +272,8 @@ func (d *ociImageDestination) addManifest(desc *imgspecv1.Descriptor) { return } } - // It's a new entry to be added to the index. - d.index.Manifests = append(d.index.Manifests, *desc) + // It's a new entry to be added to the index. Use slices.Clone() to avoid a remote dependency on how d.index was created. + d.index.Manifests = append(slices.Clone(d.index.Manifests), *desc) } // Commit marks the process of storing the image as successful and asks for the image to be persisted. @@ -283,7 +284,13 @@ func (d *ociImageDestination) addManifest(desc *imgspecv1.Descriptor) { // - Uploaded data MAY be visible to others before Commit() is called // - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) func (d *ociImageDestination) Commit(context.Context, types.UnparsedImage) error { - if err := os.WriteFile(d.ref.ociLayoutPath(), []byte(`{"imageLayoutVersion": "1.0.0"}`), 0644); err != nil { + layoutBytes, err := json.Marshal(imgspecv1.ImageLayout{ + Version: imgspecv1.ImageLayoutVersion, + }) + if err != nil { + return err + } + if err := os.WriteFile(d.ref.ociLayoutPath(), layoutBytes, 0644); err != nil { return err } indexJSON, err := json.Marshal(d.index) diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_src.go b/vendor/github.com/containers/image/v5/oci/layout/oci_src.go index 6b423f3b..f5f1debc 100644 --- a/vendor/github.com/containers/image/v5/oci/layout/oci_src.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_src.go @@ -60,7 +60,7 @@ func newImageSource(sys *types.SystemContext, ref ociReference) (private.ImageSo client := &http.Client{} client.Transport = tr - descriptor, err := ref.getManifestDescriptor() + descriptor, _, err := ref.getManifestDescriptor() if err != nil { return nil, err } diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go index 6586b844..1e26dc52 100644 --- a/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go @@ -160,48 +160,56 @@ func (ref ociReference) NewImage(ctx context.Context, sys *types.SystemContext) // getIndex returns a pointer to the index references by this ociReference. If an error occurs opening an index nil is returned together // with an error. func (ref ociReference) getIndex() (*imgspecv1.Index, error) { - indexJSON, err := os.Open(ref.indexPath()) + return parseIndex(ref.indexPath()) +} + +func parseIndex(path string) (*imgspecv1.Index, error) { + return parseJSON[imgspecv1.Index](path) +} + +func parseJSON[T any](path string) (*T, error) { + content, err := os.Open(path) if err != nil { return nil, err } - defer indexJSON.Close() + defer content.Close() - index := &imgspecv1.Index{} - if err := json.NewDecoder(indexJSON).Decode(index); err != nil { + obj := new(T) + if err := json.NewDecoder(content).Decode(obj); err != nil { return nil, err } - return index, nil + return obj, nil } -func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, error) { +func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, int, error) { index, err := ref.getIndex() if err != nil { - return imgspecv1.Descriptor{}, err + return imgspecv1.Descriptor{}, -1, err } if ref.image == "" { // return manifest if only one image is in the oci directory if len(index.Manifests) != 1 { // ask user to choose image when more than one image in the oci directory - return imgspecv1.Descriptor{}, ErrMoreThanOneImage + return imgspecv1.Descriptor{}, -1, ErrMoreThanOneImage } - return index.Manifests[0], nil + return index.Manifests[0], 0, nil } else { // if image specified, look through all manifests for a match var unsupportedMIMETypes []string - for _, md := range index.Manifests { + for i, md := range index.Manifests { if refName, ok := md.Annotations[imgspecv1.AnnotationRefName]; ok && refName == ref.image { if md.MediaType == imgspecv1.MediaTypeImageManifest || md.MediaType == imgspecv1.MediaTypeImageIndex { - return md, nil + return md, i, nil } unsupportedMIMETypes = append(unsupportedMIMETypes, md.MediaType) } } if len(unsupportedMIMETypes) != 0 { - return imgspecv1.Descriptor{}, fmt.Errorf("reference %q matches unsupported manifest MIME types %q", ref.image, unsupportedMIMETypes) + return imgspecv1.Descriptor{}, -1, fmt.Errorf("reference %q matches unsupported manifest MIME types %q", ref.image, unsupportedMIMETypes) } } - return imgspecv1.Descriptor{}, ImageNotFoundError{ref} + return imgspecv1.Descriptor{}, -1, ImageNotFoundError{ref} } // LoadManifestDescriptor loads the manifest descriptor to be used to retrieve the image name @@ -211,7 +219,8 @@ func LoadManifestDescriptor(imgRef types.ImageReference) (imgspecv1.Descriptor, if !ok { return imgspecv1.Descriptor{}, errors.New("error typecasting, need type ociRef") } - return ociRef.getManifestDescriptor() + md, _, err := ociRef.getManifestDescriptor() + return md, err } // NewImageSource returns a types.ImageSource for this reference. @@ -226,19 +235,14 @@ func (ref ociReference) NewImageDestination(ctx context.Context, sys *types.Syst return newImageDestination(sys, ref) } -// DeleteImage deletes the named image from the registry, if supported. -func (ref ociReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { - return errors.New("Deleting images not implemented for oci: images") -} - // ociLayoutPath returns a path for the oci-layout within a directory using OCI conventions. func (ref ociReference) ociLayoutPath() string { - return filepath.Join(ref.dir, "oci-layout") + return filepath.Join(ref.dir, imgspecv1.ImageLayoutFile) } // indexPath returns a path for the index.json within a directory using OCI conventions. func (ref ociReference) indexPath() string { - return filepath.Join(ref.dir, "index.json") + return filepath.Join(ref.dir, imgspecv1.ImageIndexFile) } // blobPath returns a path for a blob within a directory using OCI image-layout conventions. @@ -246,9 +250,11 @@ func (ref ociReference) blobPath(digest digest.Digest, sharedBlobDir string) (st if err := digest.Validate(); err != nil { return "", fmt.Errorf("unexpected digest reference %s: %w", digest, err) } - blobDir := filepath.Join(ref.dir, "blobs") + var blobDir string if sharedBlobDir != "" { blobDir = sharedBlobDir + } else { + blobDir = filepath.Join(ref.dir, imgspecv1.ImageBlobsDir) } return filepath.Join(blobDir, digest.Algorithm().String(), digest.Hex()), nil } diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go index bc9315f6..97562687 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go @@ -10,15 +10,20 @@ import ( "github.com/opencontainers/go-digest" ) -// replacementAttempts is the number of blob replacement candidates returned by destructivelyPrioritizeReplacementCandidates, +// replacementAttempts is the number of blob replacement candidates with known location returned by destructivelyPrioritizeReplacementCandidates, // and therefore ultimately by types.BlobInfoCache.CandidateLocations. // This is a heuristic/guess, and could well use a different value. const replacementAttempts = 5 +// replacementUnknownLocationAttempts is the number of blob replacement candidates with unknown Location returned by destructivelyPrioritizeReplacementCandidates, +// and therefore ultimately by blobinfocache.BlobInfoCache2.CandidateLocations2. +// This is a heuristic/guess, and could well use a different value. +const replacementUnknownLocationAttempts = 2 + // CandidateWithTime is the input to types.BICReplacementCandidate prioritization. type CandidateWithTime struct { Candidate blobinfocache.BICReplacementCandidate2 // The replacement candidate - LastSeen time.Time // Time the candidate was last known to exist (either read or written) + LastSeen time.Time // Time the candidate was last known to exist (either read or written) (not set for Candidate.UnknownLocation) } // candidateSortState is a local state implementing sort.Interface on candidates to prioritize, @@ -77,9 +82,22 @@ func (css *candidateSortState) Swap(i, j int) { css.cs[i], css.cs[j] = css.cs[j], css.cs[i] } -// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with a parameter for the -// number of entries to limit, only to make testing simpler. -func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, maxCandidates int) []blobinfocache.BICReplacementCandidate2 { +func min(a, b int) int { + if a < b { + return a + } + return b +} + +// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with parameters for the +// number of entries to limit for known and unknown location separately, only to make testing simpler. +// TODO: following function is not destructive any more in the nature instead priortized result is actually copies of the original +// candidate set, so In future we might wanna re-name this public API and remove the destructive prefix. +func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, totalLimit int, noLocationLimit int) []blobinfocache.BICReplacementCandidate2 { + // split unknown candidates and known candidates + // and limit them seperately. + var knownLocationCandidates []CandidateWithTime + var unknownLocationCandidates []CandidateWithTime // We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should // compare equal. // FIXME: Use slices.SortFunc after we update to Go 1.20 (Go 1.21?) and Time.Compare and cmp.Compare are available. @@ -88,24 +106,34 @@ func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest: primaryDigest, uncompressedDigest: uncompressedDigest, }) + for _, candidate := range cs { + if candidate.Candidate.UnknownLocation { + unknownLocationCandidates = append(unknownLocationCandidates, candidate) + } else { + knownLocationCandidates = append(knownLocationCandidates, candidate) + } + } - resLength := len(cs) - if resLength > maxCandidates { - resLength = maxCandidates + knownLocationCandidatesUsed := min(len(knownLocationCandidates), totalLimit) + remainingCapacity := totalLimit - knownLocationCandidatesUsed + unknownLocationCandidatesUsed := min(noLocationLimit, min(remainingCapacity, len(unknownLocationCandidates))) + res := make([]blobinfocache.BICReplacementCandidate2, knownLocationCandidatesUsed) + for i := 0; i < knownLocationCandidatesUsed; i++ { + res[i] = knownLocationCandidates[i].Candidate } - res := make([]blobinfocache.BICReplacementCandidate2, resLength) - for i := range res { - res[i] = cs[i].Candidate + // If candidates with unknown location are found, lets add them to final list + for i := 0; i < unknownLocationCandidatesUsed; i++ { + res = append(res, unknownLocationCandidates[i].Candidate) } return res } // DestructivelyPrioritizeReplacementCandidates consumes AND DESTROYS an array of possible replacement candidates with their last known existence times, -// the primary digest the user actually asked for, and the corresponding uncompressed digest (if known, possibly equal to the primary digest), -// and returns an appropriately prioritized and/or trimmed result suitable for a return value from types.BlobInfoCache.CandidateLocations. +// the primary digest the user actually asked for, the corresponding uncompressed digest (if known, possibly equal to the primary digest) returns an +// appropriately prioritized and/or trimmed result suitable for a return value from types.BlobInfoCache.CandidateLocations. // // WARNING: The array of candidates is destructively modified. (The implementation of this function could of course // make a copy, but all CandidateLocations implementations build the slice of candidates only for the single purpose of calling this function anyway.) func DestructivelyPrioritizeReplacementCandidates(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest) []blobinfocache.BICReplacementCandidate2 { - return destructivelyPrioritizeReplacementCandidatesWithMax(cs, primaryDigest, uncompressedDigest, replacementAttempts) + return destructivelyPrioritizeReplacementCandidatesWithMax(cs, primaryDigest, uncompressedDigest, replacementAttempts, replacementUnknownLocationAttempts) } diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go index 9e5c4256..cfad16b2 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go @@ -133,24 +133,39 @@ func (mem *cache) RecordDigestCompressorName(blobDigest digest.Digest, compresso mem.compressors[blobDigest] = compressorName } -// appendReplacementCandidates creates prioritize.CandidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates. -func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, requireCompressionInfo bool) []prioritize.CandidateWithTime { +// appendReplacementCandidates creates prioritize.CandidateWithTime values for digest in memory +// with corresponding compression info from mem.compressors, and returns the result of appending +// them to candidates. v2Output allows including candidates with unknown location, and filters out +// candidates with unknown compression. +func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, v2Output bool) []prioritize.CandidateWithTime { + compressorName := blobinfocache.UnknownCompression + if v, ok := mem.compressors[digest]; ok { + compressorName = v + } + if compressorName == blobinfocache.UnknownCompression && v2Output { + return candidates + } locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present - for l, t := range locations { - compressorName, compressorKnown := mem.compressors[digest] - if !compressorKnown { - if requireCompressionInfo { - continue - } - compressorName = blobinfocache.UnknownCompression + if len(locations) > 0 { + for l, t := range locations { + candidates = append(candidates, prioritize.CandidateWithTime{ + Candidate: blobinfocache.BICReplacementCandidate2{ + Digest: digest, + CompressorName: compressorName, + Location: l, + }, + LastSeen: t, + }) } + } else if v2Output { candidates = append(candidates, prioritize.CandidateWithTime{ Candidate: blobinfocache.BICReplacementCandidate2{ - Digest: digest, - CompressorName: compressorName, - Location: l, + Digest: digest, + CompressorName: compressorName, + UnknownLocation: true, + Location: types.BICLocationReference{Opaque: ""}, }, - LastSeen: t, + LastSeen: time.Time{}, }) } return candidates @@ -166,7 +181,7 @@ func (mem *cache) CandidateLocations(transport types.ImageTransport, scope types return blobinfocache.CandidateLocationsFromV2(mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, false)) } -// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations that could possibly be reused +// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known) that could possibly be reused // within the specified (transport scope) (if they still exist, which is not guaranteed). // // If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, @@ -176,23 +191,24 @@ func (mem *cache) CandidateLocations2(transport types.ImageTransport, scope type return mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, true) } -func (mem *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, requireCompressionInfo bool) []blobinfocache.BICReplacementCandidate2 { +func (mem *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, v2Output bool) []blobinfocache.BICReplacementCandidate2 { mem.mutex.Lock() defer mem.mutex.Unlock() res := []prioritize.CandidateWithTime{} - res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest, requireCompressionInfo) + res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest, v2Output) var uncompressedDigest digest.Digest // = "" if canSubstitute { if uncompressedDigest = mem.uncompressedDigestLocked(primaryDigest); uncompressedDigest != "" { - if otherDigests, ok := mem.digestsByUncompressed[uncompressedDigest]; ok { + otherDigests := mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map + if otherDigests != nil { for _, d := range otherDigests.Values() { if d != primaryDigest && d != uncompressedDigest { - res = mem.appendReplacementCandidates(res, transport, scope, d, requireCompressionInfo) + res = mem.appendReplacementCandidates(res, transport, scope, d, v2Output) } } } if uncompressedDigest != primaryDigest { - res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest, requireCompressionInfo) + res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest, v2Output) } } } diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go index 276913d6..2b446a61 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go @@ -57,7 +57,7 @@ type cache struct { // The database/sql package says “It is rarely necessary to close a DB.â€, and steers towards a long-term *sql.DB connection pool. // That’s probably very applicable for database-backed services, where the database is the primary data store. That’s not necessarily - // the case for callers of c/image, where image operations might be a small proportion of hte total runtime, and the cache is fairly + // the case for callers of c/image, where image operations might be a small proportion of the total runtime, and the cache is fairly // incidental even to the image operations. It’s also hard for us to use that model, because the public BlobInfoCache object doesn’t have // a Close method, so creating a lot of single-use caches could leak data. // @@ -117,7 +117,7 @@ func (sqc *cache) Open() { if sqc.refCount == 0 { db, err := rawOpen(sqc.path) if err != nil { - logrus.Warnf("Error opening (previously-succesfully-opened) blob info cache at %q: %v", sqc.path, err) + logrus.Warnf("Error opening (previously-successfully-opened) blob info cache at %q: %v", sqc.path, err) db = nil // But still increase sqc.refCount, because a .Close() will happen } sqc.db = db @@ -171,7 +171,7 @@ func transaction[T any](sqc *cache, fn func(tx *sql.Tx) (T, error)) (T, error) { // dbTransaction calls fn within a read-write transaction in db. func dbTransaction[T any](db *sql.DB, fn func(tx *sql.Tx) (T, error)) (T, error) { - // Ideally we should be able to distinguish between read-only and read-write transctions, see the _txlock=exclusive dicussion. + // Ideally we should be able to distinguish between read-only and read-write transactions, see the _txlock=exclusive dicussion. var zeroRes T // A zero value of T @@ -249,7 +249,7 @@ func ensureDBHasCurrentSchema(db *sql.DB) error { // * Joins (the two that exist in appendReplacementCandidates) are based on the text representation of digests. // // Using integer primary keys might make the joins themselves a bit more efficient, but then we would need to involve an extra - // join to translate from/to the user-provided digests anyway. If anything, that extra join (potentialy more btree lookups) + // join to translate from/to the user-provided digests anyway. If anything, that extra join (potentially more btree lookups) // is probably costlier than comparing a few more bytes of data. // // Perhaps more importantly, storing digest texts directly makes the database dumps much easier to read for humans without @@ -427,11 +427,13 @@ func (sqc *cache) RecordDigestCompressorName(anyDigest digest.Digest, compressor }) // FIXME? Log error (but throttle the log volume on repeated accesses)? } -// appendReplacementCandidates creates prioritize.CandidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates. -func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, tx *sql.Tx, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, requireCompressionInfo bool) ([]prioritize.CandidateWithTime, error) { +// appendReplacementCandidates creates prioritize.CandidateWithTime values for (transport, scope, digest), +// and returns the result of appending them to candidates. v2Output allows including candidates with unknown +// location, and filters out candidates with unknown compression. +func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, tx *sql.Tx, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, v2Output bool) ([]prioritize.CandidateWithTime, error) { var rows *sql.Rows var err error - if requireCompressionInfo { + if v2Output { rows, err = tx.Query("SELECT location, time, compressor FROM KnownLocations JOIN DigestCompressors "+ "ON KnownLocations.digest = DigestCompressors.digest "+ "WHERE transport = ? AND scope = ? AND KnownLocations.digest = ?", @@ -448,6 +450,7 @@ func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateW } defer rows.Close() + res := []prioritize.CandidateWithTime{} for rows.Next() { var location string var time time.Time @@ -455,7 +458,7 @@ func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateW if err := rows.Scan(&location, &time, &compressorName); err != nil { return nil, fmt.Errorf("scanning candidate: %w", err) } - candidates = append(candidates, prioritize.CandidateWithTime{ + res = append(res, prioritize.CandidateWithTime{ Candidate: blobinfocache.BICReplacementCandidate2{ Digest: digest, CompressorName: compressorName, @@ -467,10 +470,29 @@ func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateW if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterating through locations: %w", err) } + + if len(res) == 0 && v2Output { + compressor, found, err := querySingleValue[string](tx, "SELECT compressor FROM DigestCompressors WHERE digest = ?", digest.String()) + if err != nil { + return nil, fmt.Errorf("scanning compressorName: %w", err) + } + if found { + res = append(res, prioritize.CandidateWithTime{ + Candidate: blobinfocache.BICReplacementCandidate2{ + Digest: digest, + CompressorName: compressor, + UnknownLocation: true, + Location: types.BICLocationReference{Opaque: ""}, + }, + LastSeen: time.Time{}, + }) + } + } + candidates = append(candidates, res...) return candidates, nil } -// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations +// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known) // that could possibly be reused within the specified (transport scope) (if they still // exist, which is not guaranteed). // @@ -483,11 +505,11 @@ func (sqc *cache) CandidateLocations2(transport types.ImageTransport, scope type return sqc.candidateLocations(transport, scope, digest, canSubstitute, true) } -func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, requireCompressionInfo bool) []blobinfocache.BICReplacementCandidate2 { +func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, v2Output bool) []blobinfocache.BICReplacementCandidate2 { var uncompressedDigest digest.Digest // = "" res, err := transaction(sqc, func(tx *sql.Tx) ([]prioritize.CandidateWithTime, error) { res := []prioritize.CandidateWithTime{} - res, err := sqc.appendReplacementCandidates(res, tx, transport, scope, primaryDigest, requireCompressionInfo) + res, err := sqc.appendReplacementCandidates(res, tx, transport, scope, primaryDigest, v2Output) if err != nil { return nil, err } @@ -516,7 +538,7 @@ func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types return nil, err } if otherDigest != primaryDigest && otherDigest != uncompressedDigest { - res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, otherDigest, requireCompressionInfo) + res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, otherDigest, v2Output) if err != nil { return nil, err } @@ -527,7 +549,7 @@ func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types } if uncompressedDigest != primaryDigest { - res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, uncompressedDigest, requireCompressionInfo) + res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, uncompressedDigest, v2Output) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go index b987c580..c61065cb 100644 --- a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go +++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go @@ -5,6 +5,7 @@ import ( "encoding/json" "errors" "fmt" + "io/fs" "os" "os/exec" "path/filepath" @@ -61,78 +62,6 @@ func newAuthPathDefault(path string) authPath { return authPath{path: path, legacyFormat: false} } -// SetCredentials stores the username and password in a location -// appropriate for sys and the users’ configuration. -// A valid key is a repository, a namespace within a registry, or a registry hostname; -// using forms other than just a registry may fail depending on configuration. -// Returns a human-readable description of the location that was updated. -// NOTE: The return value is only intended to be read by humans; its form is not an API, -// it may change (or new forms can be added) any time. -func SetCredentials(sys *types.SystemContext, key, username, password string) (string, error) { - isNamespaced, err := validateKey(key) - if err != nil { - return "", err - } - - helpers, err := sysregistriesv2.CredentialHelpers(sys) - if err != nil { - return "", err - } - - // Make sure to collect all errors. - var multiErr error - for _, helper := range helpers { - var desc string - var err error - switch helper { - // Special-case the built-in helpers for auth files. - case sysregistriesv2.AuthenticationFileHelper: - desc, err = modifyJSON(sys, func(fileContents *dockerConfigFile) (bool, string, error) { - if ch, exists := fileContents.CredHelpers[key]; exists { - if isNamespaced { - return false, "", unsupportedNamespaceErr(ch) - } - desc, err := setCredsInCredHelper(ch, key, username, password) - if err != nil { - return false, "", err - } - return false, desc, nil - } - creds := base64.StdEncoding.EncodeToString([]byte(username + ":" + password)) - newCreds := dockerAuthConfig{Auth: creds} - fileContents.AuthConfigs[key] = newCreds - return true, "", nil - }) - // External helpers. - default: - if isNamespaced { - err = unsupportedNamespaceErr(helper) - } else { - desc, err = setCredsInCredHelper(helper, key, username, password) - } - } - if err != nil { - multiErr = multierror.Append(multiErr, err) - logrus.Debugf("Error storing credentials for %s in credential helper %s: %v", key, helper, err) - continue - } - logrus.Debugf("Stored credentials for %s in credential helper %s", key, helper) - return desc, nil - } - return "", multiErr -} - -func unsupportedNamespaceErr(helper string) error { - return fmt.Errorf("namespaced key is not supported for credential helper %s", helper) -} - -// SetAuthentication stores the username and password in the credential helper or file -// See the documentation of SetCredentials for format of "key" -func SetAuthentication(sys *types.SystemContext, key, username, password string) error { - _, err := SetCredentials(sys, key, username, password) - return err -} - // GetAllCredentials returns the registry credentials for all registries stored // in any of the configured credential helpers. func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthConfig, error) { @@ -370,17 +299,79 @@ func getAuthenticationWithHomeDir(sys *types.SystemContext, key, homeDir string) return creds.Username, creds.Password, nil } -// RemoveAuthentication removes credentials for `key` from all possible -// sources such as credential helpers and auth files. +// SetCredentials stores the username and password in a location +// appropriate for sys and the users’ configuration. // A valid key is a repository, a namespace within a registry, or a registry hostname; // using forms other than just a registry may fail depending on configuration. -func RemoveAuthentication(sys *types.SystemContext, key string) error { - isNamespaced, err := validateKey(key) +// Returns a human-readable description of the location that was updated. +// NOTE: The return value is only intended to be read by humans; its form is not an API, +// it may change (or new forms can be added) any time. +func SetCredentials(sys *types.SystemContext, key, username, password string) (string, error) { + helpers, jsonEditor, key, isNamespaced, err := prepareForEdit(sys, key, true) if err != nil { - return err + return "", err } - helpers, err := sysregistriesv2.CredentialHelpers(sys) + // Make sure to collect all errors. + var multiErr error + for _, helper := range helpers { + var desc string + var err error + switch helper { + // Special-case the built-in helpers for auth files. + case sysregistriesv2.AuthenticationFileHelper: + desc, err = jsonEditor(sys, func(fileContents *dockerConfigFile) (bool, string, error) { + if ch, exists := fileContents.CredHelpers[key]; exists { + if isNamespaced { + return false, "", unsupportedNamespaceErr(ch) + } + desc, err := setCredsInCredHelper(ch, key, username, password) + if err != nil { + return false, "", err + } + return false, desc, nil + } + creds := base64.StdEncoding.EncodeToString([]byte(username + ":" + password)) + newCreds := dockerAuthConfig{Auth: creds} + fileContents.AuthConfigs[key] = newCreds + return true, "", nil + }) + // External helpers. + default: + if isNamespaced { + err = unsupportedNamespaceErr(helper) + } else { + desc, err = setCredsInCredHelper(helper, key, username, password) + } + } + if err != nil { + multiErr = multierror.Append(multiErr, err) + logrus.Debugf("Error storing credentials for %s in credential helper %s: %v", key, helper, err) + continue + } + logrus.Debugf("Stored credentials for %s in credential helper %s", key, helper) + return desc, nil + } + return "", multiErr +} + +func unsupportedNamespaceErr(helper string) error { + return fmt.Errorf("namespaced key is not supported for credential helper %s", helper) +} + +// SetAuthentication stores the username and password in the credential helper or file +// See the documentation of SetCredentials for format of "key" +func SetAuthentication(sys *types.SystemContext, key, username, password string) error { + _, err := SetCredentials(sys, key, username, password) + return err +} + +// RemoveAuthentication removes credentials for `key` from all possible +// sources such as credential helpers and auth files. +// A valid key is a repository, a namespace within a registry, or a registry hostname; +// using forms other than just a registry may fail depending on configuration. +func RemoveAuthentication(sys *types.SystemContext, key string) error { + helpers, jsonEditor, key, isNamespaced, err := prepareForEdit(sys, key, true) if err != nil { return err } @@ -411,7 +402,7 @@ func RemoveAuthentication(sys *types.SystemContext, key string) error { switch helper { // Special-case the built-in helper for auth files. case sysregistriesv2.AuthenticationFileHelper: - _, err = modifyJSON(sys, func(fileContents *dockerConfigFile) (bool, string, error) { + _, err = jsonEditor(sys, func(fileContents *dockerConfigFile) (bool, string, error) { if innerHelper, exists := fileContents.CredHelpers[key]; exists { removeFromCredHelper(innerHelper) } @@ -443,7 +434,7 @@ func RemoveAuthentication(sys *types.SystemContext, key string) error { // RemoveAllAuthentication deletes all the credentials stored in credential // helpers and auth files. func RemoveAllAuthentication(sys *types.SystemContext) error { - helpers, err := sysregistriesv2.CredentialHelpers(sys) + helpers, jsonEditor, _, _, err := prepareForEdit(sys, "", false) if err != nil { return err } @@ -454,7 +445,7 @@ func RemoveAllAuthentication(sys *types.SystemContext) error { switch helper { // Special-case the built-in helper for auth files. case sysregistriesv2.AuthenticationFileHelper: - _, err = modifyJSON(sys, func(fileContents *dockerConfigFile) (bool, string, error) { + _, err = jsonEditor(sys, func(fileContents *dockerConfigFile) (bool, string, error) { for registry, helper := range fileContents.CredHelpers { // Helpers in auth files are expected // to exist, so no special treatment @@ -497,6 +488,46 @@ func RemoveAllAuthentication(sys *types.SystemContext) error { return multiErr } +// prepareForEdit processes sys and key (if keyRelevant) to return: +// - a list of credential helpers +// - a function which can be used to edit the JSON file +// - the key value to actually use in credential helpers / JSON +// - a boolean which is true if key is namespaced (and should not be used with credential helpers). +func prepareForEdit(sys *types.SystemContext, key string, keyRelevant bool) ([]string, func(*types.SystemContext, func(*dockerConfigFile) (bool, string, error)) (string, error), string, bool, error) { + var isNamespaced bool + if keyRelevant { + ns, err := validateKey(key) + if err != nil { + return nil, nil, "", false, err + } + isNamespaced = ns + } + + if sys != nil && sys.DockerCompatAuthFilePath != "" { + if sys.AuthFilePath != "" { + return nil, nil, "", false, errors.New("AuthFilePath and DockerCompatAuthFilePath can not be set simultaneously") + } + if keyRelevant { + if isNamespaced { + return nil, nil, "", false, fmt.Errorf("Credentials cannot be recorded in Docker-compatible format with namespaced key %q", key) + } + if key == "docker.io" { + key = "https://index.docker.io/v1/" + } + } + + // Do not use helpers defined in sysregistriesv2 because Docker isn’t aware of them. + return []string{sysregistriesv2.AuthenticationFileHelper}, modifyDockerConfigJSON, key, false, nil + } + + helpers, err := sysregistriesv2.CredentialHelpers(sys) + if err != nil { + return nil, nil, "", false, err + } + + return helpers, modifyJSON, key, isNamespaced, nil +} + func listCredsInCredHelper(credHelper string) (map[string]string, error) { helperName := fmt.Sprintf("docker-credential-%s", credHelper) p := helperclient.NewShellProgramFunc(helperName) @@ -513,9 +544,17 @@ func getPathToAuth(sys *types.SystemContext) (authPath, bool, error) { // it exists only to allow testing it with an artificial runtime.GOOS. func getPathToAuthWithOS(sys *types.SystemContext, goOS string) (authPath, bool, error) { if sys != nil { + if sys.AuthFilePath != "" && sys.DockerCompatAuthFilePath != "" { + return authPath{}, false, errors.New("AuthFilePath and DockerCompatAuthFilePath can not be set simultaneously") + } if sys.AuthFilePath != "" { return newAuthPathDefault(sys.AuthFilePath), true, nil } + // When reading, we can process auth.json and Docker’s config.json with the same code. + // When writing, prepareForEdit chooses an appropriate jsonEditor implementation. + if sys.DockerCompatAuthFilePath != "" { + return newAuthPathDefault(sys.DockerCompatAuthFilePath), true, nil + } if sys.LegacyFormatAuthFilePath != "" { return authPath{path: sys.LegacyFormatAuthFilePath, legacyFormat: true}, true, nil } @@ -626,6 +665,86 @@ func modifyJSON(sys *types.SystemContext, editor func(fileContents *dockerConfig return description, nil } +// modifyDockerConfigJSON finds a docker config.json file, calls editor on the contents, and +// writes it back if editor returns true. +// Returns a human-readable description of the file, to be returned by SetCredentials. +// +// The editor may also return a human-readable description of the updated location; if it is "", +// the file itself is used. +func modifyDockerConfigJSON(sys *types.SystemContext, editor func(fileContents *dockerConfigFile) (bool, string, error)) (string, error) { + if sys == nil || sys.DockerCompatAuthFilePath == "" { + return "", errors.New("internal error: modifyDockerConfigJSON called with DockerCompatAuthFilePath not set") + } + path := sys.DockerCompatAuthFilePath + + dir := filepath.Dir(path) + if err := os.MkdirAll(dir, 0700); err != nil { + return "", err + } + + // Try hard not to clobber fields we don’t understand, even fields which may be added in future Docker versions. + var rawContents map[string]json.RawMessage + originalBytes, err := os.ReadFile(path) + switch { + case err == nil: + if err := json.Unmarshal(originalBytes, &rawContents); err != nil { + return "", fmt.Errorf("unmarshaling JSON at %q: %w", path, err) + } + case errors.Is(err, fs.ErrNotExist): + rawContents = map[string]json.RawMessage{} + default: // err != nil + return "", err + } + + syntheticContents := dockerConfigFile{ + AuthConfigs: map[string]dockerAuthConfig{}, + CredHelpers: map[string]string{}, + } + // json.Unmarshal also falls back to case-insensitive field matching; this code does not do that. Presumably + // config.json is mostly maintained by machines doing `docker login`, so the files should, hopefully, not contain field names with + // unexpected case. + if rawAuths, ok := rawContents["auths"]; ok { + // This conversion will lose fields we don’t know about; when updating an entry, we can’t tell whether an unknown field + // should be preserved or discarded (because it is made obsolete/unwanted with the new credentials). + // It might make sense to track which entries of "auths" we actually modified, and to not touch any others. + if err := json.Unmarshal(rawAuths, &syntheticContents.AuthConfigs); err != nil { + return "", fmt.Errorf(`unmarshaling "auths" in JSON at %q: %w`, path, err) + } + } + if rawCH, ok := rawContents["credHelpers"]; ok { + if err := json.Unmarshal(rawCH, &syntheticContents.CredHelpers); err != nil { + return "", fmt.Errorf(`unmarshaling "credHelpers" in JSON at %q: %w`, path, err) + + } + } + + updated, description, err := editor(&syntheticContents) + if err != nil { + return "", fmt.Errorf("updating %q: %w", path, err) + } + if updated { + rawAuths, err := json.MarshalIndent(syntheticContents.AuthConfigs, "", "\t") + if err != nil { + return "", fmt.Errorf("marshaling JSON %q: %w", path, err) + } + rawContents["auths"] = rawAuths + // We never modify syntheticContents.CredHelpers, so we don’t need to update it. + newData, err := json.MarshalIndent(rawContents, "", "\t") + if err != nil { + return "", fmt.Errorf("marshaling JSON %q: %w", path, err) + } + + if err = ioutils.AtomicWriteFile(path, newData, 0600); err != nil { + return "", fmt.Errorf("writing to file %q: %w", path, err) + } + } + + if description == "" { + description = path + } + return description, nil +} + func getCredsFromCredHelper(credHelper, registry string) (types.DockerAuthConfig, error) { helperName := fmt.Sprintf("docker-credential-%s", credHelper) p := helperclient.NewShellProgramFunc(helperName) diff --git a/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go b/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go index eeb7c1ef..a15b2b56 100644 --- a/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go +++ b/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go @@ -11,6 +11,7 @@ import ( "github.com/containers/image/v5/types" "github.com/manifoldco/promptui" "github.com/opencontainers/go-digest" + "golang.org/x/exp/slices" "golang.org/x/term" ) @@ -169,7 +170,7 @@ func (r *Resolved) Description() string { // pull errors must equal the amount of pull candidates. func (r *Resolved) FormatPullErrors(pullErrors []error) error { if len(pullErrors) > 0 && len(pullErrors) != len(r.PullCandidates) { - pullErrors = append(pullErrors, + pullErrors = append(slices.Clone(pullErrors), fmt.Errorf("internal error: expected %d instead of %d errors for %d pull candidates", len(r.PullCandidates), len(pullErrors), len(r.PullCandidates))) } diff --git a/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go b/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go index 56b0d493..c6ec84bd 100644 --- a/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go +++ b/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go @@ -66,7 +66,7 @@ func SetupCertificates(dir string, tlsc *tls.Config) error { if err != nil { return err } - tlsc.Certificates = append(tlsc.Certificates, cert) + tlsc.Certificates = append(slices.Clone(tlsc.Certificates), cert) } if strings.HasSuffix(f.Name(), ".key") { keyName := f.Name() diff --git a/vendor/github.com/containers/image/v5/storage/storage_reference.go b/vendor/github.com/containers/image/v5/storage/storage_reference.go index 49f7d03c..a55e3405 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_reference.go +++ b/vendor/github.com/containers/image/v5/storage/storage_reference.go @@ -10,6 +10,7 @@ import ( "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/transports" "github.com/containers/image/v5/types" "github.com/containers/storage" digest "github.com/opencontainers/go-digest" @@ -101,6 +102,8 @@ func multiArchImageMatchesSystemContext(store storage.Store, img *storage.Image, // Resolve the reference's name to an image ID in the store, if there's already // one present with the same name or ID, and return the image. +// +// Returns an error matching ErrNoSuchImage if an image matching ref was not found. func (s *storageReference) resolveImage(sys *types.SystemContext) (*storage.Image, error) { var loadedImage *storage.Image if s.id == "" && s.named != nil { @@ -283,3 +286,31 @@ func (s storageReference) NewImageSource(ctx context.Context, sys *types.SystemC func (s storageReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { return newImageDestination(sys, s) } + +// ResolveReference finds the underlying storage image for a storage.Transport reference. +// It returns that image, and an updated reference which can be used to refer back to the _same_ +// image again. +// +// This matters if the input reference contains a tagged name; the destination of the tag can +// move in local storage. The updated reference returned by this function contains the resolved +// image ID, so later uses of that updated reference will either continue to refer to the same +// image, or fail. +// +// Note that it _is_ possible for the later uses to fail, either because the image was removed +// completely, or because the name used in the reference was untaged (even if the underlying image +// ID still exists in local storage). +// +// Returns an error matching ErrNoSuchImage if an image matching ref was not found. +func ResolveReference(ref types.ImageReference) (types.ImageReference, *storage.Image, error) { + sref, ok := ref.(*storageReference) + if !ok { + return nil, nil, fmt.Errorf("trying to resolve a non-%s: reference %q", Transport.Name(), + transports.ImageName(ref)) + } + clone := *sref // A shallow copy we can update + img, err := clone.resolveImage(nil) + if err != nil { + return nil, nil, err + } + return clone, img, nil +} diff --git a/vendor/github.com/containers/image/v5/storage/storage_transport.go b/vendor/github.com/containers/image/v5/storage/storage_transport.go index 58ba3ee6..deb500b4 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_transport.go +++ b/vendor/github.com/containers/image/v5/storage/storage_transport.go @@ -48,9 +48,26 @@ type StoreTransport interface { GetStoreIfSet() storage.Store // GetImage retrieves the image from the transport's store that's named // by the reference. + // Deprecated: Surprisingly, with a StoreTransport reference which contains an ID, + // this ignores that ID; and repeated calls of GetStoreImage with the same named reference + // can return different images, with no way for the caller to "freeze" the storage.Image identity + // without discarding the name entirely. + // + // Use storage.ResolveReference instead; note that if the image is not found, ResolveReference returns + // c/image/v5/storage.ErrNoSuchImage, not c/storage.ErrImageUnknown. GetImage(types.ImageReference) (*storage.Image, error) // GetStoreImage retrieves the image from a specified store that's named // by the reference. + // + // Deprecated: Surprisingly, with a StoreTransport reference which contains an ID, + // this ignores that ID; and repeated calls of GetStoreImage with the same named reference + // can return different images, with no way for the caller to "freeze" the storage.Image identity + // without discarding the name entirely. + // + // Also, a StoreTransport reference already contains a store, so providing another one is redundant. + // + // Use storage.ResolveReference instead; note that if the image is not found, ResolveReference returns + // c/image/v5/storage.ErrNoSuchImage, not c/storage.ErrImageUnknown. GetStoreImage(storage.Store, types.ImageReference) (*storage.Image, error) // ParseStoreReference parses a reference, overriding any store // specification that it may contain. @@ -290,6 +307,15 @@ func (s *storageTransport) ParseReference(reference string) (types.ImageReferenc return s.ParseStoreReference(store, reference) } +// Deprecated: Surprisingly, with a StoreTransport reference which contains an ID, +// this ignores that ID; and repeated calls of GetStoreImage with the same named reference +// can return different images, with no way for the caller to "freeze" the storage.Image identity +// without discarding the name entirely. +// +// Also, a StoreTransport reference already contains a store, so providing another one is redundant. +// +// Use storage.ResolveReference instead; note that if the image is not found, ResolveReference returns +// c/image/v5/storage.ErrNoSuchImage, not c/storage.ErrImageUnknown. func (s storageTransport) GetStoreImage(store storage.Store, ref types.ImageReference) (*storage.Image, error) { dref := ref.DockerReference() if dref != nil { @@ -306,6 +332,13 @@ func (s storageTransport) GetStoreImage(store storage.Store, ref types.ImageRefe return nil, storage.ErrImageUnknown } +// Deprecated: Surprisingly, with a StoreTransport reference which contains an ID, +// this ignores that ID; and repeated calls of GetStoreImage with the same named reference +// can return different images, with no way for the caller to "freeze" the storage.Image identity +// without discarding the name entirely. +// +// Use storage.ResolveReference instead; note that if the image is not found, ResolveReference returns +// c/image/v5/storage.ErrNoSuchImage, not c/storage.ErrImageUnknown. func (s *storageTransport) GetImage(ref types.ImageReference) (*storage.Image, error) { store, err := s.GetStore() if err != nil { diff --git a/vendor/github.com/containers/image/v5/types/types.go b/vendor/github.com/containers/image/v5/types/types.go index 33adb5f1..180a98c5 100644 --- a/vendor/github.com/containers/image/v5/types/types.go +++ b/vendor/github.com/containers/image/v5/types/types.go @@ -445,7 +445,7 @@ type ImageCloser interface { Close() error } -// ManifestUpdateOptions is a way to pass named optional arguments to Image.UpdatedManifest +// ManifestUpdateOptions is a way to pass named optional arguments to Image.UpdatedImage type ManifestUpdateOptions struct { LayerInfos []BlobInfo // Complete BlobInfos (size+digest+urls+annotations) which should replace the originals, in order (the root layer first, and then successive layered layers). BlobInfos' MediaType fields are ignored. EmbeddedDockerReference reference.Named @@ -457,7 +457,7 @@ type ManifestUpdateOptions struct { // ManifestUpdateInformation is a component of ManifestUpdateOptions, named here // only to make writing struct literals possible. type ManifestUpdateInformation struct { - Destination ImageDestination // and yes, UpdatedManifest may write to Destination (see the schema2 → schema1 conversion logic in image/docker_schema2.go) + Destination ImageDestination // and yes, UpdatedImage may write to Destination (see the schema2 → schema1 conversion logic in image/docker_schema2.go) LayerInfos []BlobInfo // Complete BlobInfos (size+digest) which have been uploaded, in order (the root layer first, and then successive layered layers) LayerDiffIDs []digest.Digest // Digest values for the _uncompressed_ contents of the blobs which have been uploaded, in the same order. } @@ -594,6 +594,10 @@ type SystemContext struct { // this field is ignored if `AuthFilePath` is set (we favor the newer format); // only reading of this data is supported; LegacyFormatAuthFilePath string + // If set, a path to a Docker-compatible "config.json" file containing credentials; and no other files are processed. + // This must not be set if AuthFilePath is set. + // Only credentials and credential helpers in this file apre processed, not any other configuration in this file. + DockerCompatAuthFilePath string // If not "", overrides the use of platform.GOARCH when choosing an image or verifying architecture match. ArchitectureChoice string // If not "", overrides the use of platform.GOOS when choosing an image or verifying OS match. diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go index 27d034dc..b24ee881 100644 --- a/vendor/github.com/containers/image/v5/version/version.go +++ b/vendor/github.com/containers/image/v5/version/version.go @@ -6,9 +6,9 @@ const ( // VersionMajor is for an API incompatible changes VersionMajor = 5 // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 28 + VersionMinor = 29 // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 0 + VersionPatch = 2 // VersionDev indicates development branch. Releases will be empty string. VersionDev = "" diff --git a/vendor/github.com/containers/luksy/.cirrus.yml b/vendor/github.com/containers/luksy/.cirrus.yml new file mode 100644 index 00000000..b639575b --- /dev/null +++ b/vendor/github.com/containers/luksy/.cirrus.yml @@ -0,0 +1,32 @@ +docker_builder: + name: CI + env: + HOME: /root + DEBIAN_FRONTEND: noninteractive + CIRRUS_LOG_TIMESTAMP: true + setup_script: | + apt-get -q update + apt-get -q install -y bats cryptsetup golang + go version + make + unit_test_script: | + go test -timeout 45m -v -cover + case $(go env GOARCH) in + amd64) + otherarch=386;; + arm64) + otherarch=arm;; + mips64) + otherarch=mips;; + mips64le) + otherarch=mipsle;; + esac + if test -n "$otherarch" ; then + echo running unit tests again with GOARCH=$otherarch + GOARCH=$otherarch go test -timeout 45m -v -cover + fi + : + defaults_script: | + bats -f defaults ./tests + aes_script: | + bats -f aes ./tests diff --git a/vendor/github.com/containers/luksy/.dockerignore b/vendor/github.com/containers/luksy/.dockerignore new file mode 100644 index 00000000..24276303 --- /dev/null +++ b/vendor/github.com/containers/luksy/.dockerignore @@ -0,0 +1,2 @@ +lukstool +lukstool.test diff --git a/vendor/github.com/containers/luksy/.gitignore b/vendor/github.com/containers/luksy/.gitignore new file mode 100644 index 00000000..3b735ec4 --- /dev/null +++ b/vendor/github.com/containers/luksy/.gitignore @@ -0,0 +1,21 @@ +# If you prefer the allow list template instead of the deny list, see community template: +# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore +# +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +# Go workspace file +go.work diff --git a/vendor/github.com/containers/luksy/Dockerfile b/vendor/github.com/containers/luksy/Dockerfile new file mode 100644 index 00000000..16ca5c7c --- /dev/null +++ b/vendor/github.com/containers/luksy/Dockerfile @@ -0,0 +1,7 @@ +FROM registry.fedoraproject.org/fedora +RUN dnf -y install golang make +WORKDIR /go/src/github.com/containers/luksy/ +COPY / /go/src/github.com/containers/luksy/ +RUN make clean all +FROM registry.fedoraproject.org/fedora-minimal +COPY --from=0 /go/src/github.com/containers/luksy/ /usr/local/bin/ diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/LICENSE b/vendor/github.com/containers/luksy/LICENSE similarity index 100% rename from vendor/github.com/container-orchestrated-devices/container-device-interface/LICENSE rename to vendor/github.com/containers/luksy/LICENSE diff --git a/vendor/github.com/containers/luksy/Makefile b/vendor/github.com/containers/luksy/Makefile new file mode 100644 index 00000000..b3563c2e --- /dev/null +++ b/vendor/github.com/containers/luksy/Makefile @@ -0,0 +1,14 @@ +GO = go +BATS = bats + +all: luksy + +luksy: cmd/luksy/*.go *.go + $(GO) build -o luksy$(shell go env GOEXE) ./cmd/luksy + +clean: + $(RM) luksy$(shell go env GOEXE) luksy.test + +test: + $(GO) test -timeout 45m -v -cover + $(BATS) ./tests diff --git a/vendor/github.com/containers/luksy/OWNERS b/vendor/github.com/containers/luksy/OWNERS new file mode 100644 index 00000000..eca8673e --- /dev/null +++ b/vendor/github.com/containers/luksy/OWNERS @@ -0,0 +1,4 @@ +approvers: + - nalind +reviewers: + - nalind diff --git a/vendor/github.com/containers/luksy/README.md b/vendor/github.com/containers/luksy/README.md new file mode 100644 index 00000000..2bf3a438 --- /dev/null +++ b/vendor/github.com/containers/luksy/README.md @@ -0,0 +1,10 @@ +luksy: offline encryption/decryption using LUKS formats [![Cirrus CI Status](https://img.shields.io/cirrus/github/containers/luksy/main)](https://cirrus-ci.com/github/containers/luksy/main) +- +luksy implements encryption and decryption using LUKSv1 and LUKSv2 formats. +Think of it as a clunkier cousin of gzip/bzip2/xz that doesn't actually produce +smaller output than input, but it encrypts, and that's nice. + +* The main goal is to be able to encrypt/decrypt when we don't have access to + the Linux device mapper. Duplicating functions of cryptsetup that it can + perform without accessing the Linux device mapper is not a priority. +* If you can use cryptsetup instead, use cryptsetup instead. diff --git a/vendor/github.com/containers/luksy/decrypt.go b/vendor/github.com/containers/luksy/decrypt.go new file mode 100644 index 00000000..b36c4f50 --- /dev/null +++ b/vendor/github.com/containers/luksy/decrypt.go @@ -0,0 +1,255 @@ +package luksy + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "strconv" + + "golang.org/x/crypto/argon2" + "golang.org/x/crypto/pbkdf2" +) + +// ReaderAtSeekCloser is a combination of io.ReaderAt, io.Seeker, and io.Closer, +// which is all we really need from an encrypted file. +type ReaderAtSeekCloser interface { + io.ReaderAt + io.Seeker + io.Closer +} + +// Decrypt attempts to verify the specified password using information from the +// header and read from the specified file. +// +// Returns a function which will decrypt payload blocks in succession, the size +// of chunks of data that the function expects, the offset in the file where +// the payload begins, and the size of the payload, assuming the payload runs +// to the end of the file. +func (h V1Header) Decrypt(password string, f ReaderAtSeekCloser) (func([]byte) ([]byte, error), int, int64, int64, error) { + size, err := f.Seek(0, io.SeekEnd) + if err != nil { + return nil, -1, -1, -1, err + } + hasher, err := hasherByName(h.HashSpec()) + if err != nil { + return nil, -1, -1, -1, fmt.Errorf("unsupported digest algorithm %q: %w", h.HashSpec(), err) + } + + activeKeys := 0 + for k := 0; k < v1NumKeys; k++ { + keyslot, err := h.KeySlot(k) + if err != nil { + return nil, -1, -1, -1, fmt.Errorf("reading key slot %d: %w", k, err) + } + active, err := keyslot.Active() + if err != nil { + return nil, -1, -1, -1, fmt.Errorf("checking if key slot %d is active: %w", k, err) + } + if !active { + continue + } + activeKeys++ + + passwordDerived := pbkdf2.Key([]byte(password), keyslot.KeySlotSalt(), int(keyslot.Iterations()), int(h.KeyBytes()), hasher) + striped := make([]byte, h.KeyBytes()*keyslot.Stripes()) + n, err := f.ReadAt(striped, int64(keyslot.KeyMaterialOffset())*V1SectorSize) + if err != nil { + return nil, -1, -1, -1, fmt.Errorf("reading diffuse material for keyslot %d: %w", k, err) + } + if n != len(striped) { + return nil, -1, -1, -1, fmt.Errorf("short read while reading diffuse material for keyslot %d: expected %d, got %d", k, len(striped), n) + } + splitKey, err := v1decrypt(h.CipherName(), h.CipherMode(), 0, passwordDerived, striped, V1SectorSize, false) + if err != nil { + fmt.Fprintf(os.Stderr, "error attempting to decrypt main key: %v\n", err) + continue + } + mkCandidate, err := afMerge(splitKey, hasher(), int(h.KeyBytes()), int(keyslot.Stripes())) + if err != nil { + fmt.Fprintf(os.Stderr, "error attempting to compute main key: %v\n", err) + continue + } + mkcandidateDerived := pbkdf2.Key(mkCandidate, h.MKDigestSalt(), int(h.MKDigestIter()), v1DigestSize, hasher) + ivTweak := 0 + decryptStream := func(ciphertext []byte) ([]byte, error) { + plaintext, err := v1decrypt(h.CipherName(), h.CipherMode(), ivTweak, mkCandidate, ciphertext, V1SectorSize, false) + ivTweak += len(ciphertext) / V1SectorSize + return plaintext, err + } + if bytes.Equal(mkcandidateDerived, h.MKDigest()) { + payloadOffset := int64(h.PayloadOffset() * V1SectorSize) + return decryptStream, V1SectorSize, payloadOffset, size - payloadOffset, nil + } + } + if activeKeys == 0 { + return nil, -1, -1, -1, errors.New("no passwords set on LUKS1 volume") + } + return nil, -1, -1, -1, errors.New("decryption error: incorrect password") +} + +// Decrypt attempts to verify the specified password using information from the +// header, JSON block, and read from the specified file. +// +// Returns a function which will decrypt payload blocks in succession, the size +// of chunks of data that the function expects, the offset in the file where +// the payload begins, and the size of the payload, assuming the payload runs +// to the end of the file. +func (h V2Header) Decrypt(password string, f ReaderAtSeekCloser, j V2JSON) (func([]byte) ([]byte, error), int, int64, int64, error) { + foundDigests := 0 + for d, digest := range j.Digests { + if digest.Type != "pbkdf2" { + continue + } + if digest.V2JSONDigestPbkdf2 == nil { + return nil, -1, -1, -1, fmt.Errorf("digest %q is corrupt: no pbkdf2 parameters", d) + } + foundDigests++ + if len(digest.Segments) == 0 || len(digest.Digest) == 0 { + continue + } + payloadOffset := int64(-1) + payloadSectorSize := V1SectorSize + payloadEncryption := "" + payloadSize := int64(0) + ivTweak := 0 + for _, segmentID := range digest.Segments { + segment, ok := j.Segments[segmentID] + if !ok { + continue // well, that was misleading + } + if segment.Type != "crypt" { + continue + } + tmp, err := strconv.ParseInt(segment.Offset, 10, 64) + if err != nil { + continue + } + payloadOffset = tmp + if segment.Size == "dynamic" { + size, err := f.Seek(0, io.SeekEnd) + if err != nil { + continue + } + payloadSize = size - payloadOffset + } else { + payloadSize, err = strconv.ParseInt(segment.Size, 10, 64) + if err != nil { + continue + } + } + payloadSectorSize = segment.SectorSize + payloadEncryption = segment.Encryption + ivTweak = segment.IVTweak + break + } + if payloadEncryption == "" { + continue + } + activeKeys := 0 + for k, keyslot := range j.Keyslots { + if keyslot.Priority != nil && *keyslot.Priority == V2JSONKeyslotPriorityIgnore { + continue + } + applicable := true + if len(digest.Keyslots) > 0 { + applicable = false + for i := 0; i < len(digest.Keyslots); i++ { + if k == digest.Keyslots[i] { + applicable = true + break + } + } + } + if !applicable { + continue + } + if keyslot.Type != "luks2" { + continue + } + if keyslot.V2JSONKeyslotLUKS2 == nil { + return nil, -1, -1, -1, fmt.Errorf("key slot %q is corrupt", k) + } + if keyslot.V2JSONKeyslotLUKS2.AF.Type != "luks1" { + continue + } + if keyslot.V2JSONKeyslotLUKS2.AF.V2JSONAFLUKS1 == nil { + return nil, -1, -1, -1, fmt.Errorf("key slot %q is corrupt: no AF parameters", k) + } + if keyslot.Area.Type != "raw" { + return nil, -1, -1, -1, fmt.Errorf("key slot %q is corrupt: key data area is not raw", k) + } + if keyslot.Area.KeySize*V2SectorSize < keyslot.KeySize*keyslot.AF.Stripes { + return nil, -1, -1, -1, fmt.Errorf("key slot %q is corrupt: key data area is too small (%d < %d)", k, keyslot.Area.KeySize*V2SectorSize, keyslot.KeySize*keyslot.AF.Stripes) + } + var passwordDerived []byte + switch keyslot.V2JSONKeyslotLUKS2.Kdf.Type { + default: + continue + case "pbkdf2": + if keyslot.V2JSONKeyslotLUKS2.Kdf.V2JSONKdfPbkdf2 == nil { + return nil, -1, -1, -1, fmt.Errorf("key slot %q is corrupt: no pbkdf2 parameters", k) + } + hasher, err := hasherByName(keyslot.Kdf.Hash) + if err != nil { + return nil, -1, -1, -1, fmt.Errorf("unsupported digest algorithm %q: %w", keyslot.Kdf.Hash, err) + } + passwordDerived = pbkdf2.Key([]byte(password), keyslot.Kdf.Salt, keyslot.Kdf.Iterations, keyslot.KeySize, hasher) + case "argon2i": + if keyslot.V2JSONKeyslotLUKS2.Kdf.V2JSONKdfArgon2i == nil { + return nil, -1, -1, -1, fmt.Errorf("key slot %q is corrupt: no argon2i parameters", k) + } + passwordDerived = argon2.Key([]byte(password), keyslot.Kdf.Salt, uint32(keyslot.Kdf.Time), uint32(keyslot.Kdf.Memory), uint8(keyslot.Kdf.CPUs), uint32(keyslot.KeySize)) + case "argon2id": + if keyslot.V2JSONKeyslotLUKS2.Kdf.V2JSONKdfArgon2i == nil { + return nil, -1, -1, -1, fmt.Errorf("key slot %q is corrupt: no argon2id parameters", k) + } + passwordDerived = argon2.IDKey([]byte(password), keyslot.Kdf.Salt, uint32(keyslot.Kdf.Time), uint32(keyslot.Kdf.Memory), uint8(keyslot.Kdf.CPUs), uint32(keyslot.KeySize)) + } + striped := make([]byte, keyslot.KeySize*keyslot.AF.Stripes) + n, err := f.ReadAt(striped, int64(keyslot.Area.Offset)) + if err != nil { + return nil, -1, -1, -1, fmt.Errorf("reading diffuse material for keyslot %q: %w", k, err) + } + if n != len(striped) { + return nil, -1, -1, -1, fmt.Errorf("short read while reading diffuse material for keyslot %q: expected %d, got %d", k, len(striped), n) + } + splitKey, err := v2decrypt(keyslot.Area.Encryption, 0, passwordDerived, striped, V1SectorSize, false) + if err != nil { + fmt.Fprintf(os.Stderr, "error attempting to decrypt main key: %v\n", err) + continue + } + afhasher, err := hasherByName(keyslot.AF.Hash) + if err != nil { + return nil, -1, -1, -1, fmt.Errorf("unsupported digest algorithm %q: %w", keyslot.AF.Hash, err) + } + mkCandidate, err := afMerge(splitKey, afhasher(), int(keyslot.KeySize), int(keyslot.AF.Stripes)) + if err != nil { + fmt.Fprintf(os.Stderr, "error attempting to compute main key: %v\n", err) + continue + } + digester, err := hasherByName(digest.Hash) + if err != nil { + return nil, -1, -1, -1, fmt.Errorf("unsupported digest algorithm %q: %w", digest.Hash, err) + } + mkcandidateDerived := pbkdf2.Key(mkCandidate, digest.Salt, digest.Iterations, len(digest.Digest), digester) + decryptStream := func(ciphertext []byte) ([]byte, error) { + plaintext, err := v2decrypt(payloadEncryption, ivTweak, mkCandidate, ciphertext, payloadSectorSize, true) + ivTweak += len(ciphertext) / payloadSectorSize + return plaintext, err + } + if bytes.Equal(mkcandidateDerived, digest.Digest) { + return decryptStream, payloadSectorSize, payloadOffset, payloadSize, nil + } + activeKeys++ + } + if activeKeys == 0 { + return nil, -1, -1, -1, fmt.Errorf("no passwords set on LUKS2 volume for digest %q", d) + } + } + if foundDigests == 0 { + return nil, -1, -1, -1, errors.New("no usable password-verification digests set on LUKS2 volume") + } + return nil, -1, -1, -1, errors.New("decryption error: incorrect password") +} diff --git a/vendor/github.com/containers/luksy/encrypt.go b/vendor/github.com/containers/luksy/encrypt.go new file mode 100644 index 00000000..63b345eb --- /dev/null +++ b/vendor/github.com/containers/luksy/encrypt.go @@ -0,0 +1,421 @@ +package luksy + +import ( + "crypto/rand" + "encoding/json" + "errors" + "fmt" + "strconv" + "strings" + + "github.com/google/uuid" + "golang.org/x/crypto/argon2" + "golang.org/x/crypto/pbkdf2" +) + +// EncryptV1 prepares to encrypt data using one or more passwords and the +// specified cipher (or a default, if the specified cipher is ""). +// +// Returns a fixed LUKSv1 header which contains keying information, a function +// which will encrypt blocks of data in succession, and the size of chunks of +// data that it expects. +func EncryptV1(password []string, cipher string) ([]byte, func([]byte) ([]byte, error), int, error) { + if len(password) == 0 { + return nil, nil, -1, errors.New("at least one password is required") + } + if len(password) > v1NumKeys { + return nil, nil, -1, fmt.Errorf("attempted to use %d passwords, only %d possible", len(password), v1NumKeys) + } + if cipher == "" { + cipher = "aes-xts-plain64" + } + + salt := make([]byte, v1SaltSize) + n, err := rand.Read(salt) + if err != nil { + return nil, nil, -1, fmt.Errorf("reading random data: %w", err) + } + if n != len(salt) { + return nil, nil, -1, errors.New("short read") + } + + cipherSpec := strings.SplitN(cipher, "-", 3) + if len(cipherSpec) != 3 || len(cipherSpec[0]) == 0 || len(cipherSpec[1]) == 0 || len(cipherSpec[2]) == 0 { + return nil, nil, -1, fmt.Errorf("invalid cipher %q", cipher) + } + + var h V1Header + if err := h.SetMagic(V1Magic); err != nil { + return nil, nil, -1, fmt.Errorf("setting magic to v1: %w", err) + } + if err := h.SetVersion(1); err != nil { + return nil, nil, -1, fmt.Errorf("setting version to 1: %w", err) + } + h.SetCipherName(cipherSpec[0]) + h.SetCipherMode(cipherSpec[1] + "-" + cipherSpec[2]) + h.SetHashSpec("sha256") + h.SetKeyBytes(32) + if cipherSpec[1] == "xts" { + h.SetKeyBytes(64) + } + h.SetMKDigestSalt(salt) + h.SetMKDigestIter(V1Stripes) + h.SetUUID(uuid.NewString()) + + mkey := make([]byte, h.KeyBytes()) + n, err = rand.Read(mkey) + if err != nil { + return nil, nil, -1, fmt.Errorf("reading random data: %w", err) + } + if n != len(mkey) { + return nil, nil, -1, errors.New("short read") + } + + hasher, err := hasherByName(h.HashSpec()) + if err != nil { + return nil, nil, -1, errors.New("internal error") + } + + mkdigest := pbkdf2.Key(mkey, h.MKDigestSalt(), int(h.MKDigestIter()), v1DigestSize, hasher) + h.SetMKDigest(mkdigest) + + headerLength := roundUpToMultiple(v1HeaderStructSize, V1AlignKeyslots) + iterations := IterationsPBKDF2(salt, int(h.KeyBytes()), hasher) + var stripes [][]byte + ksSalt := make([]byte, v1KeySlotSaltLength) + for i := 0; i < v1NumKeys; i++ { + n, err = rand.Read(ksSalt) + if err != nil { + return nil, nil, -1, fmt.Errorf("reading random data: %w", err) + } + if n != len(ksSalt) { + return nil, nil, -1, errors.New("short read") + } + var keyslot V1KeySlot + keyslot.SetActive(i < len(password)) + keyslot.SetIterations(uint32(iterations)) + keyslot.SetStripes(V1Stripes) + keyslot.SetKeySlotSalt(ksSalt) + if i < len(password) { + splitKey, err := afSplit(mkey, hasher(), int(h.MKDigestIter())) + if err != nil { + return nil, nil, -1, fmt.Errorf("splitting key: %w", err) + } + passwordDerived := pbkdf2.Key([]byte(password[i]), keyslot.KeySlotSalt(), int(keyslot.Iterations()), int(h.KeyBytes()), hasher) + striped, err := v1encrypt(h.CipherName(), h.CipherMode(), 0, passwordDerived, splitKey, V1SectorSize, false) + if err != nil { + return nil, nil, -1, fmt.Errorf("encrypting split key with password: %w", err) + } + if len(striped) != len(mkey)*int(keyslot.Stripes()) { + return nil, nil, -1, fmt.Errorf("internal error: got %d stripe bytes, expected %d", len(striped), len(mkey)*int(keyslot.Stripes())) + } + stripes = append(stripes, striped) + } + keyslot.SetKeyMaterialOffset(uint32(headerLength / V1SectorSize)) + if err := h.SetKeySlot(i, keyslot); err != nil { + return nil, nil, -1, fmt.Errorf("internal error: setting value for key slot %d: %w", i, err) + } + headerLength += len(mkey) * int(keyslot.Stripes()) + headerLength = roundUpToMultiple(headerLength, V1AlignKeyslots) + } + headerLength = roundUpToMultiple(headerLength, V1SectorSize) + + h.SetPayloadOffset(uint32(headerLength / V1SectorSize)) + head := make([]byte, headerLength) + offset := copy(head, h[:]) + offset = roundUpToMultiple(offset, V1AlignKeyslots) + for _, stripe := range stripes { + copy(head[offset:], stripe) + offset = roundUpToMultiple(offset+len(stripe), V1AlignKeyslots) + } + ivTweak := 0 + encryptStream := func(plaintext []byte) ([]byte, error) { + ciphertext, err := v1encrypt(h.CipherName(), h.CipherMode(), ivTweak, mkey, plaintext, V1SectorSize, true) + ivTweak += len(plaintext) / V1SectorSize + return ciphertext, err + } + return head, encryptStream, V1SectorSize, nil +} + +// EncryptV2 prepares to encrypt data using one or more passwords and the +// specified cipher (or a default, if the specified cipher is ""). +// +// Returns a fixed LUKSv2 header which contains keying information, a +// function which will encrypt blocks of data in succession, and the size of +// chunks of data that it expects. +func EncryptV2(password []string, cipher string, payloadSectorSize int) ([]byte, func([]byte) ([]byte, error), int, error) { + if len(password) == 0 { + return nil, nil, -1, errors.New("at least one password is required") + } + if cipher == "" { + cipher = "aes-xts-plain64" + } + cipherSpec := strings.SplitN(cipher, "-", 3) + if len(cipherSpec) != 3 || len(cipherSpec[0]) == 0 || len(cipherSpec[1]) == 0 || len(cipherSpec[2]) == 0 { + return nil, nil, -1, fmt.Errorf("invalid cipher %q", cipher) + } + if payloadSectorSize == 0 { + payloadSectorSize = V2SectorSize + } + switch payloadSectorSize { + default: + return nil, nil, -1, fmt.Errorf("invalid sector size %d", payloadSectorSize) + case 512, 1024, 2048, 4096: + } + + headerSalts := make([]byte, v1SaltSize*3) + n, err := rand.Read(headerSalts) + if err != nil { + return nil, nil, -1, err + } + if n != len(headerSalts) { + return nil, nil, -1, errors.New("short read") + } + hSalt1 := headerSalts[:v1SaltSize] + hSalt2 := headerSalts[v1SaltSize : v1SaltSize*2] + mkeySalt := headerSalts[v1SaltSize*2:] + + roundHeaderSize := func(size int) (int, error) { + switch { + case size < 0x4000: + return 0x4000, nil + case size < 0x8000: + return 0x8000, nil + case size < 0x10000: + return 0x10000, nil + case size < 0x20000: + return 0x20000, nil + case size < 0x40000: + return 0x40000, nil + case size < 0x80000: + return 0x80000, nil + case size < 0x100000: + return 0x100000, nil + case size < 0x200000: + return 0x200000, nil + case size < 0x400000: + return 0x400000, nil + } + return 0, fmt.Errorf("internal error: unsupported header size %d", size) + } + + var h1, h2 V2Header + if err := h1.SetMagic(V2Magic1); err != nil { + return nil, nil, -1, fmt.Errorf("setting magic to v2: %w", err) + } + if err := h2.SetMagic(V2Magic2); err != nil { + return nil, nil, -1, fmt.Errorf("setting magic to v2: %w", err) + } + if err := h1.SetVersion(2); err != nil { + return nil, nil, -1, fmt.Errorf("setting version to 2: %w", err) + } + if err := h2.SetVersion(2); err != nil { + return nil, nil, -1, fmt.Errorf("setting version to 2: %w", err) + } + h1.SetSequenceID(1) + h2.SetSequenceID(1) + h1.SetLabel("") + h2.SetLabel("") + h1.SetChecksumAlgorithm("sha256") + h2.SetChecksumAlgorithm("sha256") + h1.SetSalt(hSalt1) + h2.SetSalt(hSalt2) + uuidString := uuid.NewString() + h1.SetUUID(uuidString) + h2.SetUUID(uuidString) + h1.SetHeaderOffset(0) + h2.SetHeaderOffset(0) + h1.SetChecksum(nil) + h2.SetChecksum(nil) + + mkey := make([]byte, 32) + if cipherSpec[1] == "xts" { + mkey = make([]byte, 64) + } + n, err = rand.Read(mkey) + if err != nil { + return nil, nil, -1, fmt.Errorf("reading random data: %w", err) + } + if n != len(mkey) { + return nil, nil, -1, errors.New("short read") + } + + tuningSalt := make([]byte, v1SaltSize) + hasher, err := hasherByName(h1.ChecksumAlgorithm()) + if err != nil { + return nil, nil, -1, errors.New("internal error") + } + iterations := IterationsPBKDF2(tuningSalt, len(mkey), hasher) + timeCost := 16 + threadsCost := 16 + memoryCost := MemoryCostArgon2(tuningSalt, len(mkey), timeCost, threadsCost) + priority := V2JSONKeyslotPriorityNormal + var stripes [][]byte + var keyslots []V2JSONKeyslot + + mdigest := pbkdf2.Key(mkey, mkeySalt, iterations, len(hasher().Sum([]byte{})), hasher) + digest0 := V2JSONDigest{ + Type: "pbkdf2", + Salt: mkeySalt, + Digest: mdigest, + Segments: []string{"0"}, + V2JSONDigestPbkdf2: &V2JSONDigestPbkdf2{ + Hash: h1.ChecksumAlgorithm(), + Iterations: iterations, + }, + } + + for i := range password { + keyslotSalt := make([]byte, v1SaltSize) + n, err := rand.Read(keyslotSalt) + if err != nil { + return nil, nil, -1, err + } + if n != len(keyslotSalt) { + return nil, nil, -1, errors.New("short read") + } + key := argon2.Key([]byte(password[i]), keyslotSalt, uint32(timeCost), uint32(memoryCost), uint8(threadsCost), uint32(len(mkey))) + split, err := afSplit(mkey, hasher(), V2Stripes) + if err != nil { + return nil, nil, -1, fmt.Errorf("splitting: %w", err) + } + striped, err := v2encrypt(cipher, 0, key, split, V1SectorSize, false) + if err != nil { + return nil, nil, -1, fmt.Errorf("encrypting: %w", err) + } + stripes = append(stripes, striped) + keyslot := V2JSONKeyslot{ + Type: "luks2", + KeySize: len(mkey), + Area: V2JSONArea{ + Type: "raw", + Offset: 10000000, // gets updated later + Size: int64(roundUpToMultiple(len(striped), V2AlignKeyslots)), + V2JSONAreaRaw: &V2JSONAreaRaw{ + Encryption: cipher, + KeySize: len(key), + }, + }, + Priority: &priority, + V2JSONKeyslotLUKS2: &V2JSONKeyslotLUKS2{ + AF: V2JSONAF{ + Type: "luks1", + V2JSONAFLUKS1: &V2JSONAFLUKS1{ + Stripes: V2Stripes, + Hash: h1.ChecksumAlgorithm(), + }, + }, + Kdf: V2JSONKdf{ + Type: "argon2i", + Salt: keyslotSalt, + V2JSONKdfArgon2i: &V2JSONKdfArgon2i{ + Time: timeCost, + Memory: memoryCost, + CPUs: threadsCost, + }, + }, + }, + } + keyslots = append(keyslots, keyslot) + digest0.Keyslots = append(digest0.Keyslots, strconv.Itoa(i)) + } + + segment0 := V2JSONSegment{ + Type: "crypt", + Offset: "10000000", // gets updated later + Size: "dynamic", + V2JSONSegmentCrypt: &V2JSONSegmentCrypt{ + IVTweak: 0, + Encryption: cipher, + SectorSize: payloadSectorSize, + }, + } + + j := V2JSON{ + Config: V2JSONConfig{}, + Keyslots: map[string]V2JSONKeyslot{}, + Digests: map[string]V2JSONDigest{}, + Segments: map[string]V2JSONSegment{}, + Tokens: map[string]V2JSONToken{}, + } +rebuild: + j.Digests["0"] = digest0 + j.Segments["0"] = segment0 + encodedJSON, err := json.Marshal(j) + if err != nil { + return nil, nil, -1, err + } + headerPlusPaddedJsonSize, err := roundHeaderSize(int(V2SectorSize) /* binary header */ + len(encodedJSON) + 1) + if err != nil { + return nil, nil, -1, err + } + if j.Config.JsonSize != headerPlusPaddedJsonSize-V2SectorSize { + j.Config.JsonSize = headerPlusPaddedJsonSize - V2SectorSize + goto rebuild + } + + if h1.HeaderSize() != uint64(headerPlusPaddedJsonSize) { + h1.SetHeaderSize(uint64(headerPlusPaddedJsonSize)) + h2.SetHeaderSize(uint64(headerPlusPaddedJsonSize)) + h1.SetHeaderOffset(0) + h2.SetHeaderOffset(uint64(headerPlusPaddedJsonSize)) + goto rebuild + } + + keyslotsOffset := h2.HeaderOffset() * 2 + maxKeys := len(password) + if maxKeys < 64 { + maxKeys = 64 + } + for i := 0; i < len(password); i++ { + oldOffset := keyslots[i].Area.Offset + keyslots[i].Area.Offset = int64(keyslotsOffset) + int64(roundUpToMultiple(len(mkey)*V2Stripes, V2AlignKeyslots))*int64(i) + j.Keyslots[strconv.Itoa(i)] = keyslots[i] + if keyslots[i].Area.Offset != oldOffset { + goto rebuild + } + } + keyslotsSize := roundUpToMultiple(len(mkey)*V2Stripes, V2AlignKeyslots) * maxKeys + if j.Config.KeyslotsSize != keyslotsSize { + j.Config.KeyslotsSize = keyslotsSize + goto rebuild + } + + segmentOffsetInt := roundUpToMultiple(int(keyslotsOffset)+j.Config.KeyslotsSize, V2SectorSize) + segmentOffset := strconv.Itoa(segmentOffsetInt) + if segment0.Offset != segmentOffset { + segment0.Offset = segmentOffset + goto rebuild + } + + d1 := hasher() + h1.SetChecksum(nil) + d1.Write(h1[:]) + d1.Write(encodedJSON) + zeropad := make([]byte, headerPlusPaddedJsonSize-len(h1)-len(encodedJSON)) + d1.Write(zeropad) + h1.SetChecksum(d1.Sum(nil)) + d2 := hasher() + h2.SetChecksum(nil) + d2.Write(h2[:]) + d2.Write(encodedJSON) + d1.Write(zeropad) + h2.SetChecksum(d2.Sum(nil)) + + head := make([]byte, segmentOffsetInt) + copy(head, h1[:]) + copy(head[V2SectorSize:], encodedJSON) + copy(head[h2.HeaderOffset():], h2[:]) + copy(head[h2.HeaderOffset()+V2SectorSize:], encodedJSON) + for i := 0; i < len(password); i++ { + iAsString := strconv.Itoa(i) + copy(head[j.Keyslots[iAsString].Area.Offset:], stripes[i]) + } + ivTweak := 0 + encryptStream := func(plaintext []byte) ([]byte, error) { + ciphertext, err := v2encrypt(cipher, ivTweak, mkey, plaintext, payloadSectorSize, true) + ivTweak += len(plaintext) / payloadSectorSize + return ciphertext, err + } + return head, encryptStream, segment0.SectorSize, nil +} diff --git a/vendor/github.com/containers/luksy/encryption.go b/vendor/github.com/containers/luksy/encryption.go new file mode 100644 index 00000000..242bceb3 --- /dev/null +++ b/vendor/github.com/containers/luksy/encryption.go @@ -0,0 +1,572 @@ +package luksy + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/rand" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "encoding/binary" + "errors" + "fmt" + "hash" + "io" + "strings" + + "github.com/aead/serpent" + "golang.org/x/crypto/cast5" + "golang.org/x/crypto/ripemd160" + "golang.org/x/crypto/twofish" + "golang.org/x/crypto/xts" +) + +func v1encrypt(cipherName, cipherMode string, ivTweak int, key []byte, plaintext []byte, sectorSize int, bulk bool) ([]byte, error) { + var err error + var newBlockCipher func([]byte) (cipher.Block, error) + ciphertext := make([]byte, len(plaintext)) + + switch cipherName { + case "aes": + newBlockCipher = aes.NewCipher + case "twofish": + newBlockCipher = func(key []byte) (cipher.Block, error) { return twofish.NewCipher(key) } + case "cast5": + newBlockCipher = func(key []byte) (cipher.Block, error) { return cast5.NewCipher(key) } + case "serpent": + newBlockCipher = serpent.NewCipher + default: + return nil, fmt.Errorf("unsupported cipher %s", cipherName) + } + if sectorSize == 0 { + sectorSize = V1SectorSize + } + switch sectorSize { + default: + return nil, fmt.Errorf("invalid sector size %d", sectorSize) + case 512, 1024, 2048, 4096: + } + + switch cipherMode { + case "ecb": + cipher, err := newBlockCipher(key) + if err != nil { + return nil, fmt.Errorf("initializing encryption: %w", err) + } + for processed := 0; processed < len(plaintext); processed += cipher.BlockSize() { + blockLeft := sectorSize + if processed+blockLeft > len(plaintext) { + blockLeft = len(plaintext) - processed + } + cipher.Encrypt(ciphertext[processed:processed+blockLeft], plaintext[processed:processed+blockLeft]) + } + case "cbc-plain": + block, err := newBlockCipher(key) + if err != nil { + return nil, fmt.Errorf("initializing encryption: %w", err) + } + for processed := 0; processed < len(plaintext); processed += sectorSize { + blockLeft := sectorSize + if processed+blockLeft > len(plaintext) { + blockLeft = len(plaintext) - processed + } + ivValue := processed/sectorSize + ivTweak + if bulk { // iv_large_sectors is not being used + ivValue *= sectorSize / V1SectorSize + } + iv0 := make([]byte, block.BlockSize()) + binary.LittleEndian.PutUint32(iv0, uint32(ivValue)) + cipher := cipher.NewCBCEncrypter(block, iv0) + cipher.CryptBlocks(ciphertext[processed:processed+blockLeft], plaintext[processed:processed+blockLeft]) + } + case "cbc-plain64": + block, err := newBlockCipher(key) + if err != nil { + return nil, fmt.Errorf("initializing encryption: %w", err) + } + for processed := 0; processed < len(plaintext); processed += sectorSize { + blockLeft := sectorSize + if processed+blockLeft > len(plaintext) { + blockLeft = len(plaintext) - processed + } + ivValue := processed/sectorSize + ivTweak + if bulk { // iv_large_sectors is not being used + ivValue *= sectorSize / V1SectorSize + } + iv0 := make([]byte, block.BlockSize()) + binary.LittleEndian.PutUint64(iv0, uint64(ivValue)) + cipher := cipher.NewCBCEncrypter(block, iv0) + cipher.CryptBlocks(ciphertext[processed:processed+blockLeft], plaintext[processed:processed+blockLeft]) + } + case "cbc-essiv:sha256": + hasherName := strings.TrimPrefix(cipherMode, "cbc-essiv:") + hasher, err := hasherByName(hasherName) + if err != nil { + return nil, fmt.Errorf("initializing encryption using hash %s: %w", hasherName, err) + } + h := hasher() + h.Write(key) + makeiv, err := newBlockCipher(h.Sum(nil)) + if err != nil { + return nil, fmt.Errorf("initializing encryption: %w", err) + } + block, err := newBlockCipher(key) + if err != nil { + return nil, fmt.Errorf("initializing encryption: %w", err) + } + for processed := 0; processed < len(plaintext); processed += sectorSize { + blockLeft := sectorSize + if processed+blockLeft > len(plaintext) { + blockLeft = len(plaintext) - processed + } + ivValue := (processed/sectorSize + ivTweak) + if bulk { // iv_large_sectors is not being used + ivValue *= sectorSize / V1SectorSize + } + plain0 := make([]byte, makeiv.BlockSize()) + binary.LittleEndian.PutUint64(plain0, uint64(ivValue)) + iv0 := make([]byte, makeiv.BlockSize()) + makeiv.Encrypt(iv0, plain0) + cipher := cipher.NewCBCEncrypter(block, iv0) + cipher.CryptBlocks(ciphertext[processed:processed+blockLeft], plaintext[processed:processed+blockLeft]) + } + case "xts-plain": + cipher, err := xts.NewCipher(newBlockCipher, key) + if err != nil { + return nil, fmt.Errorf("initializing encryption: %w", err) + } + for processed := 0; processed < len(plaintext); processed += sectorSize { + blockLeft := sectorSize + if processed+blockLeft > len(plaintext) { + blockLeft = len(plaintext) - processed + } + sector := uint64(processed/sectorSize + ivTweak) + if bulk { // iv_large_sectors is not being used + sector *= uint64(sectorSize / V1SectorSize) + } + sector = sector % 0x100000000 + cipher.Encrypt(ciphertext[processed:processed+blockLeft], plaintext[processed:processed+blockLeft], sector) + } + case "xts-plain64": + cipher, err := xts.NewCipher(newBlockCipher, key) + if err != nil { + return nil, fmt.Errorf("initializing encryption: %w", err) + } + for processed := 0; processed < len(plaintext); processed += sectorSize { + blockLeft := sectorSize + if processed+blockLeft > len(plaintext) { + blockLeft = len(plaintext) - processed + } + sector := uint64(processed/sectorSize + ivTweak) + if bulk { // iv_large_sectors is not being used + sector *= uint64(sectorSize / V1SectorSize) + } + cipher.Encrypt(ciphertext[processed:processed+blockLeft], plaintext[processed:processed+blockLeft], sector) + } + default: + return nil, fmt.Errorf("unsupported cipher mode %s", cipherMode) + } + + if err != nil { + return nil, fmt.Errorf("cipher error: %w", err) + } + return ciphertext, nil +} + +func v1decrypt(cipherName, cipherMode string, ivTweak int, key []byte, ciphertext []byte, sectorSize int, bulk bool) ([]byte, error) { + var err error + var newBlockCipher func([]byte) (cipher.Block, error) + plaintext := make([]byte, len(ciphertext)) + + switch cipherName { + case "aes": + newBlockCipher = aes.NewCipher + case "twofish": + newBlockCipher = func(key []byte) (cipher.Block, error) { return twofish.NewCipher(key) } + case "cast5": + newBlockCipher = func(key []byte) (cipher.Block, error) { return cast5.NewCipher(key) } + case "serpent": + newBlockCipher = serpent.NewCipher + default: + return nil, fmt.Errorf("unsupported cipher %s", cipherName) + } + if sectorSize == 0 { + sectorSize = V1SectorSize + } + switch sectorSize { + default: + return nil, fmt.Errorf("invalid sector size %d", sectorSize) + case 512, 1024, 2048, 4096: + } + + switch cipherMode { + case "ecb": + cipher, err := newBlockCipher(key) + if err != nil { + return nil, fmt.Errorf("initializing decryption: %w", err) + } + for processed := 0; processed < len(ciphertext); processed += cipher.BlockSize() { + blockLeft := sectorSize + if processed+blockLeft > len(ciphertext) { + blockLeft = len(ciphertext) - processed + } + cipher.Decrypt(plaintext[processed:processed+blockLeft], ciphertext[processed:processed+blockLeft]) + } + case "cbc-plain": + block, err := newBlockCipher(key) + if err != nil { + return nil, fmt.Errorf("initializing decryption: %w", err) + } + for processed := 0; processed < len(plaintext); processed += sectorSize { + blockLeft := sectorSize + if processed+blockLeft > len(plaintext) { + blockLeft = len(plaintext) - processed + } + ivValue := processed/sectorSize + ivTweak + if bulk { // iv_large_sectors is not being used + ivValue *= sectorSize / V1SectorSize + } + iv0 := make([]byte, block.BlockSize()) + binary.LittleEndian.PutUint32(iv0, uint32(ivValue)) + cipher := cipher.NewCBCDecrypter(block, iv0) + cipher.CryptBlocks(plaintext[processed:processed+blockLeft], ciphertext[processed:processed+blockLeft]) + } + case "cbc-plain64": + block, err := newBlockCipher(key) + if err != nil { + return nil, fmt.Errorf("initializing decryption: %w", err) + } + for processed := 0; processed < len(plaintext); processed += sectorSize { + blockLeft := sectorSize + if processed+blockLeft > len(plaintext) { + blockLeft = len(plaintext) - processed + } + ivValue := processed/sectorSize + ivTweak + if bulk { // iv_large_sectors is not being used + ivValue *= sectorSize / V1SectorSize + } + iv0 := make([]byte, block.BlockSize()) + binary.LittleEndian.PutUint64(iv0, uint64(ivValue)) + cipher := cipher.NewCBCDecrypter(block, iv0) + cipher.CryptBlocks(plaintext[processed:processed+blockLeft], ciphertext[processed:processed+blockLeft]) + } + case "cbc-essiv:sha256": + hasherName := strings.TrimPrefix(cipherMode, "cbc-essiv:") + hasher, err := hasherByName(hasherName) + if err != nil { + return nil, fmt.Errorf("initializing decryption using hash %s: %w", hasherName, err) + } + h := hasher() + h.Write(key) + makeiv, err := newBlockCipher(h.Sum(nil)) + if err != nil { + return nil, fmt.Errorf("initializing decryption: %w", err) + } + block, err := newBlockCipher(key) + if err != nil { + return nil, fmt.Errorf("initializing decryption: %w", err) + } + for processed := 0; processed < len(plaintext); processed += sectorSize { + blockLeft := sectorSize + if processed+blockLeft > len(plaintext) { + blockLeft = len(plaintext) - processed + } + ivValue := (processed/sectorSize + ivTweak) + if bulk { // iv_large_sectors is not being used + ivValue *= sectorSize / V1SectorSize + } + plain0 := make([]byte, makeiv.BlockSize()) + binary.LittleEndian.PutUint64(plain0, uint64(ivValue)) + iv0 := make([]byte, makeiv.BlockSize()) + makeiv.Encrypt(iv0, plain0) + cipher := cipher.NewCBCDecrypter(block, iv0) + cipher.CryptBlocks(plaintext[processed:processed+blockLeft], ciphertext[processed:processed+blockLeft]) + } + case "xts-plain": + cipher, err := xts.NewCipher(newBlockCipher, key) + if err != nil { + return nil, fmt.Errorf("initializing decryption: %w", err) + } + for processed := 0; processed < len(ciphertext); processed += sectorSize { + blockLeft := sectorSize + if processed+blockLeft > len(ciphertext) { + blockLeft = len(ciphertext) - processed + } + sector := uint64(processed/sectorSize + ivTweak) + if bulk { // iv_large_sectors is not being used + sector *= uint64(sectorSize / V1SectorSize) + } + sector = sector % 0x100000000 + cipher.Decrypt(plaintext[processed:processed+blockLeft], ciphertext[processed:processed+blockLeft], sector) + } + case "xts-plain64": + cipher, err := xts.NewCipher(newBlockCipher, key) + if err != nil { + return nil, fmt.Errorf("initializing decryption: %w", err) + } + for processed := 0; processed < len(ciphertext); processed += sectorSize { + blockLeft := sectorSize + if processed+blockLeft > len(ciphertext) { + blockLeft = len(ciphertext) - processed + } + sector := uint64(processed/sectorSize + ivTweak) + if bulk { // iv_large_sectors is not being used + sector *= uint64(sectorSize / V1SectorSize) + } + cipher.Decrypt(plaintext[processed:processed+blockLeft], ciphertext[processed:processed+blockLeft], sector) + } + default: + return nil, fmt.Errorf("unsupported cipher mode %s", cipherMode) + } + + if err != nil { + return nil, fmt.Errorf("cipher error: %w", err) + } + return plaintext, nil +} + +func v2encrypt(cipherSuite string, ivTweak int, key []byte, ciphertext []byte, sectorSize int, bulk bool) ([]byte, error) { + var cipherName, cipherMode string + switch { + default: + cipherSpec := strings.SplitN(cipherSuite, "-", 2) + if len(cipherSpec) < 2 { + return nil, fmt.Errorf("unrecognized cipher suite %q", cipherSuite) + } + cipherName = cipherSpec[0] + cipherMode = cipherSpec[1] + } + return v1encrypt(cipherName, cipherMode, ivTweak, key, ciphertext, sectorSize, bulk) +} + +func v2decrypt(cipherSuite string, ivTweak int, key []byte, ciphertext []byte, sectorSize int, bulk bool) ([]byte, error) { + var cipherName, cipherMode string + switch { + default: + cipherSpec := strings.SplitN(cipherSuite, "-", 2) + if len(cipherSpec) < 2 { + return nil, fmt.Errorf("unrecognized cipher suite %q", cipherSuite) + } + cipherName = cipherSpec[0] + cipherMode = cipherSpec[1] + } + return v1decrypt(cipherName, cipherMode, ivTweak, key, ciphertext, sectorSize, bulk) +} + +func diffuse(key []byte, h hash.Hash) []byte { + sum := make([]byte, len(key)) + counter := uint32(0) + for summed := 0; summed < len(key); summed += h.Size() { + h.Reset() + var buf [4]byte + binary.BigEndian.PutUint32(buf[:], counter) + h.Write(buf[:]) + needed := len(key) - summed + if needed > h.Size() { + needed = h.Size() + } + h.Write(key[summed : summed+needed]) + partial := h.Sum(nil) + copy(sum[summed:summed+needed], partial) + counter++ + } + return sum +} + +func afMerge(splitKey []byte, h hash.Hash, keysize int, stripes int) ([]byte, error) { + if len(splitKey) != keysize*stripes { + return nil, fmt.Errorf("expected %d af bytes, got %d", keysize*stripes, len(splitKey)) + } + d := make([]byte, keysize) + for i := 0; i < stripes-1; i++ { + for j := 0; j < keysize; j++ { + d[j] = d[j] ^ splitKey[i*keysize+j] + } + d = diffuse(d, h) + } + for j := 0; j < keysize; j++ { + d[j] = d[j] ^ splitKey[(stripes-1)*keysize+j] + } + return d, nil +} + +func afSplit(key []byte, h hash.Hash, stripes int) ([]byte, error) { + keysize := len(key) + s := make([]byte, keysize*stripes) + d := make([]byte, keysize) + n, err := rand.Read(s[0 : (keysize-1)*stripes]) + if err != nil { + return nil, err + } + if n != (keysize-1)*stripes { + return nil, fmt.Errorf("short read when attempting to read random data: %d < %d", n, (keysize-1)*stripes) + } + for i := 0; i < stripes-1; i++ { + for j := 0; j < keysize; j++ { + d[j] = d[j] ^ s[i*keysize+j] + } + d = diffuse(d, h) + } + for j := 0; j < keysize; j++ { + s[(stripes-1)*keysize+j] = d[j] ^ key[j] + } + return s, nil +} + +func roundUpToMultiple(i, factor int) int { + if i < 0 { + return 0 + } + if factor < 1 { + return i + } + return i + ((factor - (i % factor)) % factor) +} + +func roundDownToMultiple(i, factor int) int { + if i < 0 { + return 0 + } + if factor < 1 { + return i + } + return i - (i % factor) +} + +func hasherByName(name string) (func() hash.Hash, error) { + switch name { + case "sha1": + return sha1.New, nil + case "sha256": + return sha256.New, nil + case "sha512": + return sha512.New, nil + case "ripemd160": + return ripemd160.New, nil + default: + return nil, fmt.Errorf("unsupported digest algorithm %q", name) + } +} + +type wrapper struct { + fn func(plaintext []byte) ([]byte, error) + blockSize int + buf []byte + buffered int + processed int + reader io.Reader + eof bool + writer io.Writer +} + +func (w *wrapper) partialWrite() error { + if w.buffered-w.processed >= w.blockSize { + toProcess := roundDownToMultiple(w.buffered-w.processed, w.blockSize) + processed, err := w.fn(w.buf[w.processed : w.processed+toProcess]) + if err != nil { + return err + } + nProcessed := copy(w.buf[w.processed:], processed) + w.processed += nProcessed + } + if w.processed >= w.blockSize { + nWritten, err := w.writer.Write(w.buf[:w.processed]) + if err != nil { + return err + } + copy(w.buf, w.buf[nWritten:w.buffered]) + w.buffered -= nWritten + w.processed -= nWritten + if w.processed != 0 { + return fmt.Errorf("short write: %d != %d", nWritten, nWritten+w.processed) + } + } + return nil +} + +func (w *wrapper) Write(buf []byte) (int, error) { + n := 0 + for n < len(buf) { + nBuffered := copy(w.buf[w.buffered:], buf[n:]) + w.buffered += nBuffered + n += nBuffered + if err := w.partialWrite(); err != nil { + return n, err + } + } + return n, nil +} + +func (w *wrapper) Read(buf []byte) (int, error) { + n := 0 + for n < len(buf) { + if !w.eof { + nRead, err := w.reader.Read(w.buf[w.buffered:]) + if err != nil { + if !errors.Is(err, io.EOF) { + w.buffered += nRead + return n, err + } + w.eof = true + } + w.buffered += nRead + } + if w.buffered == 0 && w.eof { + return n, io.EOF + } + if w.buffered-w.processed >= w.blockSize { + toProcess := roundDownToMultiple(w.buffered-w.processed, w.blockSize) + processed, err := w.fn(w.buf[w.processed : w.processed+toProcess]) + if err != nil { + return n, err + } + nProcessed := copy(w.buf[w.processed:], processed) + w.processed += nProcessed + } + nRead := copy(buf[n:], w.buf[:w.processed]) + n += nRead + copy(w.buf, w.buf[nRead:w.buffered]) + w.processed -= nRead + w.buffered -= nRead + if w.buffered-w.processed < w.blockSize { + break + } + } + return n, nil +} + +func (w *wrapper) Close() error { + if w.writer != nil { + if w.buffered%w.blockSize != 0 { + nPadding := w.blockSize - w.buffered%w.blockSize + nWritten, err := w.Write(make([]byte, nPadding)) + if err != nil { + return fmt.Errorf("flushing write: %v", err) + } + if nWritten < nPadding { + return fmt.Errorf("flushing write: %d != %d", nPadding, nWritten) + } + } + } + return nil +} + +// EncryptWriter creates an io.WriteCloser which buffers writes through an +// encryption function, transforming and writing multiples of the blockSize. +// After writing a final block, the returned writer should be closed. +// If only a partial block has been written when Close() is called, a final +// block with its length padded with zero bytes will be transformed and +// written. +func EncryptWriter(fn func(plaintext []byte) ([]byte, error), writer io.Writer, blockSize int) io.WriteCloser { + bufferSize := roundUpToMultiple(1024*1024, blockSize) + return &wrapper{fn: fn, blockSize: blockSize, buf: make([]byte, bufferSize), writer: writer} +} + +// DecryptReader creates an io.ReadCloser which buffers reads through a +// decryption function, decrypting and returning multiples of the blockSize +// until it reaches the end of the file. When data will no longer be read, the +// returned reader should be closed. +func DecryptReader(fn func(ciphertext []byte) ([]byte, error), reader io.Reader, blockSize int) io.ReadCloser { + bufferSize := roundUpToMultiple(1024*1024, blockSize) + return &wrapper{fn: fn, blockSize: blockSize, buf: make([]byte, bufferSize), reader: reader} +} diff --git a/vendor/github.com/containers/luksy/luks.go b/vendor/github.com/containers/luksy/luks.go new file mode 100644 index 00000000..c584c5f3 --- /dev/null +++ b/vendor/github.com/containers/luksy/luks.go @@ -0,0 +1,75 @@ +package luksy + +import ( + "bytes" + "encoding/json" + "fmt" + "io" +) + +// ReadHeaderOptions can control some of what ReadHeaders() does. +type ReadHeaderOptions struct{} + +// ReadHeaders reads LUKS headers from the specified file, returning either a +// LUKSv1 header, or two LUKSv2 headers and a LUKSv2 JSON block, depending on +// which format is detected. +func ReadHeaders(f io.ReaderAt, options ReadHeaderOptions) (*V1Header, *V2Header, *V2Header, *V2JSON, error) { + var v1 V1Header + var v2a, v2b V2Header + n, err := f.ReadAt(v2a[:], 0) + if err != nil { + return nil, nil, nil, nil, err + } + if n != len(v2a) { + return nil, nil, nil, nil, fmt.Errorf("only able to read %d bytes - file truncated?", n) + } + if n, err = f.ReadAt(v1[:], 0); err != nil { + return nil, nil, nil, nil, err + } + if n != len(v1) { + return nil, nil, nil, nil, fmt.Errorf("only able to read %d bytes - file truncated?", n) + } + if v2a.Magic() != V2Magic1 { + return nil, nil, nil, nil, fmt.Errorf("internal error: magic mismatch in LUKS header (%q)", v2a.Magic()) + } + switch v2a.Version() { // is it a v1 header, or the first v2 header? + case 1: + return &v1, nil, nil, nil, nil + case 2: + size := v2a.HeaderSize() + if size > 0x7fffffffffffffff { + return nil, nil, nil, nil, fmt.Errorf("unsupported header size while looking for second header") + } + if size < 4096 { + return nil, nil, nil, nil, fmt.Errorf("unsupported header size while looking for JSON data") + } + if n, err = f.ReadAt(v2b[:], int64(size)); err != nil || n != len(v2b) { + if err == nil && n != len(v2b) { + err = fmt.Errorf("short read: read only %d bytes, should have read %d", n, len(v2b)) + } + return nil, nil, nil, nil, err + } + if v2b.Magic() != V2Magic2 { + return nil, nil, nil, nil, fmt.Errorf("internal error: magic mismatch in second LUKS header (%q)", v2b.Magic()) + } + jsonSize := size - 4096 + buf := make([]byte, jsonSize) + n, err = f.ReadAt(buf[:], 4096) + if err != nil { + return nil, nil, nil, nil, fmt.Errorf("internal error: while reading JSON data: %w", err) + } + if n < 0 || uint64(n) != jsonSize { + return nil, nil, nil, nil, fmt.Errorf("internal error: short read while reading JSON data (wanted %d, got %d)", jsonSize, n) + } + var jsonData V2JSON + buf = bytes.TrimRightFunc(buf, func(r rune) bool { return r == 0 }) + if err = json.Unmarshal(buf, &jsonData); err != nil { + return nil, nil, nil, nil, fmt.Errorf("internal error: decoding JSON data: %w", err) + } + if uint64(jsonData.Config.JsonSize) != jsonSize { + return nil, nil, nil, nil, fmt.Errorf("internal error: JSON data size mismatch: (expected %d, used %d)", jsonData.Config.JsonSize, jsonSize) + } + return nil, &v2a, &v2b, &jsonData, nil + } + return nil, nil, nil, nil, fmt.Errorf("error reading LUKS header - magic identifier not found") +} diff --git a/vendor/github.com/containers/luksy/tune.go b/vendor/github.com/containers/luksy/tune.go new file mode 100644 index 00000000..6624f882 --- /dev/null +++ b/vendor/github.com/containers/luksy/tune.go @@ -0,0 +1,55 @@ +package luksy + +import ( + "hash" + "time" + + "golang.org/x/crypto/argon2" + "golang.org/x/crypto/pbkdf2" +) + +func durationOf(f func()) time.Duration { + start := time.Now() + f() + return time.Since(start) +} + +func IterationsPBKDF2(salt []byte, keyLen int, h func() hash.Hash) int { + iterations := 2 + var d time.Duration + for d < time.Second { + d = durationOf(func() { + _ = pbkdf2.Key([]byte{}, salt, iterations, keyLen, h) + }) + if d < time.Second/10 { + iterations *= 2 + } else { + return iterations * int(time.Second) / int(d) + } + } + return iterations +} + +func memoryCostArgon2(salt []byte, keyLen, timeCost, threadsCost int, kdf func([]byte, []byte, uint32, uint32, uint8, uint32) []byte) int { + memoryCost := 2 + var d time.Duration + for d < time.Second { + d = durationOf(func() { + _ = kdf([]byte{}, salt, uint32(timeCost), uint32(memoryCost), uint8(threadsCost), uint32(keyLen)) + }) + if d < time.Second/10 { + memoryCost *= 2 + } else { + return memoryCost * int(float64(time.Second)/float64(d)) + } + } + return memoryCost +} + +func MemoryCostArgon2(salt []byte, keyLen, timeCost, threadsCost int) int { + return memoryCostArgon2(salt, keyLen, timeCost, threadsCost, argon2.Key) +} + +func MemoryCostArgon2i(salt []byte, keyLen, timeCost, threadsCost int) int { + return memoryCostArgon2(salt, keyLen, timeCost, threadsCost, argon2.IDKey) +} diff --git a/vendor/github.com/containers/luksy/v1header.go b/vendor/github.com/containers/luksy/v1header.go new file mode 100644 index 00000000..ded4a61d --- /dev/null +++ b/vendor/github.com/containers/luksy/v1header.go @@ -0,0 +1,321 @@ +package luksy + +import ( + "encoding/binary" + "fmt" + "syscall" +) + +type ( + V1Header [592]uint8 + V1KeySlot [48]uint8 +) + +const ( + // Mostly verbatim from LUKS1 On-Disk Format Specification version 1.2.3 + V1Magic = "LUKS\xba\xbe" + v1MagicStart = 0 + v1MagicLength = 6 + v1VersionStart = v1MagicStart + v1MagicLength + v1VersionLength = 2 + v1CipherNameStart = v1VersionStart + v1VersionLength + v1CipherNameLength = 32 + v1CipherModeStart = v1CipherNameStart + v1CipherNameLength + v1CipherModeLength = 32 + v1HashSpecStart = v1CipherModeStart + v1CipherModeLength + v1HashSpecLength = 32 + v1PayloadOffsetStart = v1HashSpecStart + v1HashSpecLength + v1PayloadOffsetLength = 4 + v1KeyBytesStart = v1PayloadOffsetStart + v1PayloadOffsetLength + v1KeyBytesLength = 4 + v1MKDigestStart = v1KeyBytesStart + v1KeyBytesLength + v1MKDigestLength = v1DigestSize + v1MKDigestSaltStart = v1MKDigestStart + v1MKDigestLength + v1MKDigestSaltLength = v1SaltSize + v1MKDigestIterStart = v1MKDigestSaltStart + v1MKDigestSaltLength + v1MKDigestIterLength = 4 + v1UUIDStart = v1MKDigestIterStart + v1MKDigestIterLength + v1UUIDLength = 40 + v1KeySlot1Start = v1UUIDStart + v1UUIDLength + v1KeySlot1Length = 48 + v1KeySlot2Start = v1KeySlot1Start + v1KeySlot1Length + v1KeySlot2Length = 48 + v1KeySlot3Start = v1KeySlot2Start + v1KeySlot2Length + v1KeySlot3Length = 48 + v1KeySlot4Start = v1KeySlot3Start + v1KeySlot3Length + v1KeySlot4Length = 48 + v1KeySlot5Start = v1KeySlot4Start + v1KeySlot4Length + v1KeySlot5Length = 48 + v1KeySlot6Start = v1KeySlot5Start + v1KeySlot5Length + v1KeySlot6Length = 48 + v1KeySlot7Start = v1KeySlot6Start + v1KeySlot6Length + v1KeySlot7Length = 48 + v1KeySlot8Start = v1KeySlot7Start + v1KeySlot7Length + v1KeySlot8Length = 48 + v1HeaderStructSize = v1KeySlot8Start + v1KeySlot8Length + + v1KeySlotActiveStart = 0 + v1KeySlotActiveLength = 4 + v1KeySlotIterationsStart = v1KeySlotActiveStart + v1KeySlotActiveLength + v1KeySlotIterationsLength = 4 + v1KeySlotSaltStart = v1KeySlotIterationsStart + v1KeySlotIterationsLength + v1KeySlotSaltLength = v1SaltSize + v1KeySlotKeyMaterialOffsetStart = v1KeySlotSaltStart + v1KeySlotSaltLength + v1KeySlotKeyMaterialOffsetLength = 4 + v1KeySlotStripesStart = v1KeySlotKeyMaterialOffsetStart + v1KeySlotKeyMaterialOffsetLength + v1KeySlotStripesLength = 4 + v1KeySlotStructSize = v1KeySlotStripesStart + v1KeySlotStripesLength + + v1DigestSize = 20 + v1SaltSize = 32 + v1NumKeys = 8 + v1KeySlotActiveKeyDisabled = 0x0000dead + v1KeySlotActiveKeyEnabled = 0x00ac71f3 + V1Stripes = 4000 + V1AlignKeyslots = 4096 + V1SectorSize = 512 +) + +func (h V1Header) readu2(offset int) uint16 { + return binary.BigEndian.Uint16(h[offset:]) +} + +func (h V1Header) readu4(offset int) uint32 { + return binary.BigEndian.Uint32(h[offset:]) +} + +func (h *V1Header) writeu2(offset int, value uint16) { + binary.BigEndian.PutUint16(h[offset:], value) +} + +func (h *V1Header) writeu4(offset int, value uint32) { + binary.BigEndian.PutUint32(h[offset:], value) +} + +func (h V1Header) Magic() string { + return trimZeroPad(string(h[v1MagicStart : v1MagicStart+v1MagicLength])) +} + +func (h *V1Header) SetMagic(magic string) error { + switch magic { + case V1Magic: + copy(h[v1MagicStart:v1MagicStart+v1MagicLength], []uint8(magic)) + return nil + } + return fmt.Errorf("magic %q not acceptable, only %q is an acceptable magic value: %w", magic, V1Magic, syscall.EINVAL) +} + +func (h V1Header) Version() uint16 { + return h.readu2(v1VersionStart) +} + +func (h *V1Header) SetVersion(version uint16) error { + switch version { + case 1: + h.writeu2(v1VersionStart, version) + return nil + } + return fmt.Errorf("version %d not acceptable, only 1 is an acceptable version: %w", version, syscall.EINVAL) +} + +func (h *V1Header) setZeroString(offset int, value string, length int) { + for len(value) < length { + value = value + "\000" + } + copy(h[offset:offset+length], []uint8(value)) +} + +func (h *V1Header) setInt8(offset int, s []uint8, length int) { + t := make([]byte, length) + copy(t, s) + copy(h[offset:offset+length], s) +} + +func (h V1Header) CipherName() string { + return trimZeroPad(string(h[v1CipherNameStart : v1CipherNameStart+v1CipherNameLength])) +} + +func (h *V1Header) SetCipherName(name string) { + h.setZeroString(v1CipherNameStart, name, v1CipherNameLength) +} + +func (h V1Header) CipherMode() string { + return trimZeroPad(string(h[v1CipherModeStart : v1CipherModeStart+v1CipherModeLength])) +} + +func (h *V1Header) SetCipherMode(mode string) { + h.setZeroString(v1CipherModeStart, mode, v1CipherModeLength) +} + +func (h V1Header) HashSpec() string { + return trimZeroPad(string(h[v1HashSpecStart : v1HashSpecStart+v1HashSpecLength])) +} + +func (h *V1Header) SetHashSpec(spec string) { + h.setZeroString(v1HashSpecStart, spec, v1HashSpecLength) +} + +func (h V1Header) PayloadOffset() uint32 { + return h.readu4(v1PayloadOffsetStart) +} + +func (h *V1Header) SetPayloadOffset(offset uint32) { + h.writeu4(v1PayloadOffsetStart, offset) +} + +func (h V1Header) KeyBytes() uint32 { + return h.readu4(v1KeyBytesStart) +} + +func (h *V1Header) SetKeyBytes(bytes uint32) { + h.writeu4(v1KeyBytesStart, bytes) +} + +func (h *V1Header) KeySlot(slot int) (V1KeySlot, error) { + var ks V1KeySlot + if slot < 0 || slot >= v1NumKeys { + return ks, fmt.Errorf("invalid key slot number (must be 0..%d)", v1NumKeys-1) + } + switch slot { + case 0: + copy(ks[:], h[v1KeySlot1Start:v1KeySlot1Start+v1KeySlot1Length]) + case 1: + copy(ks[:], h[v1KeySlot2Start:v1KeySlot2Start+v1KeySlot2Length]) + case 2: + copy(ks[:], h[v1KeySlot3Start:v1KeySlot3Start+v1KeySlot3Length]) + case 3: + copy(ks[:], h[v1KeySlot4Start:v1KeySlot4Start+v1KeySlot4Length]) + case 4: + copy(ks[:], h[v1KeySlot5Start:v1KeySlot5Start+v1KeySlot5Length]) + case 5: + copy(ks[:], h[v1KeySlot6Start:v1KeySlot6Start+v1KeySlot6Length]) + case 6: + copy(ks[:], h[v1KeySlot7Start:v1KeySlot7Start+v1KeySlot7Length]) + case 7: + copy(ks[:], h[v1KeySlot8Start:v1KeySlot8Start+v1KeySlot8Length]) + } + return ks, nil +} + +func (h *V1Header) SetKeySlot(slot int, ks V1KeySlot) error { + if slot < 0 || slot >= v1NumKeys { + return fmt.Errorf("invalid key slot number (must be 0..%d)", v1NumKeys-1) + } + switch slot { + case 0: + copy(h[v1KeySlot1Start:v1KeySlot1Start+v1KeySlot1Length], ks[:]) + case 1: + copy(h[v1KeySlot2Start:v1KeySlot2Start+v1KeySlot2Length], ks[:]) + case 2: + copy(h[v1KeySlot3Start:v1KeySlot3Start+v1KeySlot3Length], ks[:]) + case 3: + copy(h[v1KeySlot4Start:v1KeySlot4Start+v1KeySlot4Length], ks[:]) + case 4: + copy(h[v1KeySlot5Start:v1KeySlot5Start+v1KeySlot5Length], ks[:]) + case 5: + copy(h[v1KeySlot6Start:v1KeySlot6Start+v1KeySlot6Length], ks[:]) + case 6: + copy(h[v1KeySlot7Start:v1KeySlot7Start+v1KeySlot7Length], ks[:]) + case 7: + copy(h[v1KeySlot8Start:v1KeySlot8Start+v1KeySlot8Length], ks[:]) + } + return nil +} + +func (h V1Header) MKDigest() []uint8 { + return dupInt8(h[v1MKDigestStart : v1MKDigestStart+v1MKDigestLength]) +} + +func (h *V1Header) SetMKDigest(digest []uint8) { + h.setInt8(v1MKDigestStart, digest, v1MKDigestLength) +} + +func (h V1Header) MKDigestSalt() []uint8 { + return dupInt8(h[v1MKDigestSaltStart : v1MKDigestSaltStart+v1MKDigestSaltLength]) +} + +func (h *V1Header) SetMKDigestSalt(salt []uint8) { + h.setInt8(v1MKDigestSaltStart, salt, v1MKDigestSaltLength) +} + +func (h V1Header) MKDigestIter() uint32 { + return h.readu4(v1MKDigestIterStart) +} + +func (h *V1Header) SetMKDigestIter(bytes uint32) { + h.writeu4(v1MKDigestIterStart, bytes) +} + +func (h V1Header) UUID() string { + return trimZeroPad(string(h[v1UUIDStart : v1UUIDStart+v1UUIDLength])) +} + +func (h *V1Header) SetUUID(uuid string) { + h.setZeroString(v1UUIDStart, uuid, v1UUIDLength) +} + +func (s V1KeySlot) readu4(offset int) uint32 { + return binary.BigEndian.Uint32(s[offset:]) +} + +func (s *V1KeySlot) writeu4(offset int, value uint32) { + binary.BigEndian.PutUint32(s[offset:], value) +} + +func (s *V1KeySlot) setInt8(offset int, i []uint8, length int) { + for len(s) < length { + i = append(i, 0) + } + copy(s[offset:offset+length], i) +} + +func (s V1KeySlot) Active() (bool, error) { + active := s.readu4(v1KeySlotActiveStart) + switch active { + case v1KeySlotActiveKeyDisabled: + return false, nil + case v1KeySlotActiveKeyEnabled: + return true, nil + } + return false, fmt.Errorf("got invalid active value %#0x: %w", active, syscall.EINVAL) +} + +func (s *V1KeySlot) SetActive(active bool) { + if active { + s.writeu4(v1KeySlotActiveStart, v1KeySlotActiveKeyEnabled) + return + } + s.writeu4(v1KeySlotActiveStart, v1KeySlotActiveKeyDisabled) +} + +func (s V1KeySlot) Iterations() uint32 { + return s.readu4(v1KeySlotIterationsStart) +} + +func (s *V1KeySlot) SetIterations(iterations uint32) { + s.writeu4(v1KeySlotIterationsStart, iterations) +} + +func (s V1KeySlot) KeySlotSalt() []uint8 { + return dupInt8(s[v1KeySlotSaltStart : v1KeySlotSaltStart+v1KeySlotSaltLength]) +} + +func (s *V1KeySlot) SetKeySlotSalt(salt []uint8) { + s.setInt8(v1KeySlotSaltStart, salt, v1KeySlotSaltLength) +} + +func (s V1KeySlot) KeyMaterialOffset() uint32 { + return s.readu4(v1KeySlotKeyMaterialOffsetStart) +} + +func (s *V1KeySlot) SetKeyMaterialOffset(material uint32) { + s.writeu4(v1KeySlotKeyMaterialOffsetStart, material) +} + +func (s V1KeySlot) Stripes() uint32 { + return s.readu4(v1KeySlotStripesStart) +} + +func (s *V1KeySlot) SetStripes(stripes uint32) { + s.writeu4(v1KeySlotStripesStart, stripes) +} diff --git a/vendor/github.com/containers/luksy/v2header.go b/vendor/github.com/containers/luksy/v2header.go new file mode 100644 index 00000000..4f94a05e --- /dev/null +++ b/vendor/github.com/containers/luksy/v2header.go @@ -0,0 +1,203 @@ +package luksy + +import ( + "fmt" + "strings" + "syscall" +) + +type V2Header [4096]uint8 + +const ( + // Mostly verbatim from LUKS2 On-Disk Format Specification version 1.1.1 + V2Magic1 = V1Magic + V2Magic2 = "SKUL\xba\xbe" + v2MagicStart = 0 + v2MagicLength = 6 + v2VersionStart = v2MagicStart + v2MagicLength + v2VersionLength = 2 + v2HeaderSizeStart = v2VersionStart + v2VersionLength + v2HeaderSizeLength = 8 + v2SequenceIDStart = v2HeaderSizeStart + v2HeaderSizeLength + v2SequenceIDLength = 8 + v2LabelStart = v2SequenceIDStart + v2SequenceIDLength + v2LabelLength = 48 + v2ChecksumAlgorithmStart = v2LabelStart + v2LabelLength + v2ChecksumAlgorithmLength = 32 + v2SaltStart = v2ChecksumAlgorithmStart + v2ChecksumAlgorithmLength + v2SaltLength = 64 + v2UUIDStart = v2SaltStart + v2SaltLength + v2UUIDLength = 40 + v2SubsystemStart = v2UUIDStart + v2UUIDLength + v2SubsystemLength = v2LabelLength + v2HeaderOffsetStart = v2SubsystemStart + v2SubsystemLength + v2HeaderOffsetLength = 8 + v2Padding1Start = v2HeaderOffsetStart + v2HeaderOffsetLength + v2Padding1Length = 184 + v2ChecksumStart = v2Padding1Start + v2Padding1Length + v2ChecksumLength = 64 + v2Padding4096Start = v2ChecksumStart + v2ChecksumLength + v2Padding4096Length = 7 * 512 + v2HeaderStructSize = v2Padding4096Start + v2Padding4096Length + + V2Stripes = 4000 + V2AlignKeyslots = 4096 + V2SectorSize = 4096 +) + +func (h V2Header) Magic() string { + return string(h[v2MagicStart : v2MagicStart+v2MagicLength]) +} + +func (h *V2Header) SetMagic(magic string) error { + switch magic { + case V2Magic1, V2Magic2: + copy(h[v2MagicStart:v2MagicStart+v2MagicLength], []uint8(magic)) + return nil + } + return fmt.Errorf("magic %q not acceptable, only %q and %q are acceptable magic values: %w", magic, V2Magic1, V2Magic2, syscall.EINVAL) +} + +func (h V2Header) readu2(offset int) uint16 { + t := uint16(0) + for i := 0; i < 2; i++ { + t = (t << 8) + uint16(h[offset+i]) + } + return t +} + +func (h V2Header) readu8(offset int) uint64 { + t := uint64(0) + for i := 0; i < 8; i++ { + t = (t << 8) + uint64(h[offset+i]) + } + return t +} + +func (h *V2Header) writeu2(offset int, value uint16) { + t := value + for i := 0; i < 2; i++ { + h[offset+1-i] = uint8(uint64(t) & 0xff) + t >>= 8 + } +} + +func (h *V2Header) writeu8(offset int, value uint64) { + t := value + for i := 0; i < 8; i++ { + h[offset+7-i] = uint8(uint64(t) & 0xff) + t >>= 8 + } +} + +func (h V2Header) Version() uint16 { + return h.readu2(v2VersionStart) +} + +func (h *V2Header) SetVersion(version uint16) error { + switch version { + case 2: + h.writeu2(v2VersionStart, version) + return nil + } + return fmt.Errorf("version %d not acceptable, only 2 is an acceptable version: %w", version, syscall.EINVAL) +} + +func (h V2Header) HeaderSize() uint64 { + return h.readu8(v2HeaderSizeStart) +} + +func (h *V2Header) SetHeaderSize(size uint64) { + h.writeu8(v2HeaderSizeStart, size) +} + +func (h V2Header) SequenceID() uint64 { + return h.readu8(v2SequenceIDStart) +} + +func (h *V2Header) SetSequenceID(id uint64) { + h.writeu8(v2SequenceIDStart, id) +} + +func trimZeroPad(s string) string { + return strings.TrimRightFunc(s, func(r rune) bool { return r == 0 }) +} + +func (h V2Header) Label() string { + return trimZeroPad(string(h[v2LabelStart : v2LabelStart+v2LabelLength])) +} + +func (h *V2Header) setZeroString(offset int, value string, length int) { + for len(value) < length { + value = value + "\000" + } + copy(h[offset:offset+length], []uint8(value)) +} + +func (h *V2Header) SetLabel(label string) { + h.setZeroString(v2LabelStart, label, v2LabelLength) +} + +func (h V2Header) ChecksumAlgorithm() string { + return trimZeroPad(string(h[v2ChecksumAlgorithmStart : v2ChecksumAlgorithmStart+v2ChecksumAlgorithmLength])) +} + +func (h *V2Header) SetChecksumAlgorithm(alg string) { + h.setZeroString(v2ChecksumAlgorithmStart, alg, v2ChecksumAlgorithmLength) +} + +func dupInt8(s []uint8) []uint8 { + c := make([]uint8, len(s)) + copy(c, s) + return c +} + +func (h *V2Header) setInt8(offset int, s []uint8, length int) { + t := make([]byte, length) + copy(t, s) + copy(h[offset:offset+length], t) +} + +func (h V2Header) Salt() []uint8 { + return dupInt8(h[v2SaltStart : v2SaltStart+v2SaltLength]) +} + +func (h *V2Header) SetSalt(salt []uint8) { + h.setInt8(v2SaltStart, salt, v2SaltLength) +} + +func (h V2Header) UUID() string { + return trimZeroPad(string(h[v2UUIDStart : v2UUIDStart+v2UUIDLength])) +} + +func (h *V2Header) SetUUID(uuid string) { + h.setZeroString(v2UUIDStart, uuid, v2UUIDLength) +} + +func (h V2Header) Subsystem() string { + return trimZeroPad(string(h[v2SubsystemStart : v2SubsystemStart+v2SubsystemLength])) +} + +func (h *V2Header) SetSubsystem(ss string) { + h.setZeroString(v2SubsystemStart, ss, v2SubsystemLength) +} + +func (h V2Header) HeaderOffset() uint64 { + return h.readu8(v2HeaderOffsetStart) +} + +func (h *V2Header) SetHeaderOffset(o uint64) { + h.writeu8(v2HeaderOffsetStart, o) +} + +func (h V2Header) Checksum() []uint8 { + hasher, err := hasherByName(h.ChecksumAlgorithm()) + if err == nil { + return dupInt8(h[v2ChecksumStart : v2ChecksumStart+hasher().Size()]) + } + return dupInt8(h[v2ChecksumStart : v2ChecksumStart+v2ChecksumLength]) +} + +func (h *V2Header) SetChecksum(sum []uint8) { + h.setInt8(v2ChecksumStart, sum, v2ChecksumLength) +} diff --git a/vendor/github.com/containers/luksy/v2json.go b/vendor/github.com/containers/luksy/v2json.go new file mode 100644 index 00000000..5d7650d3 --- /dev/null +++ b/vendor/github.com/containers/luksy/v2json.go @@ -0,0 +1,157 @@ +package luksy + +type V2JSON struct { + Config V2JSONConfig `json:"config"` + Keyslots map[string]V2JSONKeyslot `json:"keyslots"` + Digests map[string]V2JSONDigest `json:"digests"` + Segments map[string]V2JSONSegment `json:"segments"` + Tokens map[string]V2JSONToken `json:"tokens"` +} + +type V2JSONKeyslotPriority int + +func (p V2JSONKeyslotPriority) String() string { + switch p { + case V2JSONKeyslotPriorityIgnore: + return "ignore" + case V2JSONKeyslotPriorityNormal: + return "normal" + case V2JSONKeyslotPriorityHigh: + return "high" + } + return "unknown" +} + +const ( + V2JSONKeyslotPriorityIgnore = V2JSONKeyslotPriority(0) + V2JSONKeyslotPriorityNormal = V2JSONKeyslotPriority(1) + V2JSONKeyslotPriorityHigh = V2JSONKeyslotPriority(2) +) + +type V2JSONKeyslot struct { + Type string `json:"type"` + KeySize int `json:"key_size"` + Area V2JSONArea `json:"area"` + Priority *V2JSONKeyslotPriority `json:"priority,omitempty"` + *V2JSONKeyslotLUKS2 // type = "luks2" + *V2JSONKeyslotReencrypt // type = "reencrypt" +} + +type V2JSONKeyslotLUKS2 struct { + AF V2JSONAF `json:"af"` + Kdf V2JSONKdf `json:"kdf"` +} + +type V2JSONKeyslotReencrypt struct { + Mode string `json:"mode"` // only "reencrypt", "encrypt", "decrypt" + Direction string `json:"direction"` // only "forward", "backward" +} + +type V2JSONArea struct { + Type string `json:"type"` // only "raw", "none", "journal", "checksum", "datashift", "datashift-journal", "datashift-checksum" + Offset int64 `json:"offset,string"` + Size int64 `json:"size,string"` + *V2JSONAreaRaw // type = "raw" + *V2JSONAreaChecksum // type = "checksum" + *V2JSONAreaDatashift // type = "datashift" + *V2JSONAreaDatashiftChecksum // type = "datashift-checksum" +} + +type V2JSONAreaRaw struct { + Encryption string `json:"encryption"` + KeySize int `json:"key_size"` +} + +type V2JSONAreaChecksum struct { + Hash string `json:"hash"` + SectorSize int `json:"sector_size"` +} + +type V2JSONAreaDatashift struct { + ShiftSize int `json:"shift_size,string"` +} + +type V2JSONAreaDatashiftChecksum struct { + V2JSONAreaChecksum + V2JSONAreaDatashift +} + +type V2JSONAF struct { + Type string `json:"type"` // "luks1" + *V2JSONAFLUKS1 // type == "luks1" +} + +type V2JSONAFLUKS1 struct { + Stripes int `json:"stripes"` // 4000 + Hash string `json:"hash"` // "sha256" +} + +type V2JSONKdf struct { + Type string `json:"type"` + Salt []byte `json:"salt"` + *V2JSONKdfPbkdf2 // type = "pbkdf2" + *V2JSONKdfArgon2i // type = "argon2i" or type = "argon2id" +} + +type V2JSONKdfPbkdf2 struct { + Hash string `json:"hash"` + Iterations int `json:"iterations"` +} + +type V2JSONKdfArgon2i struct { + Time int `json:"time"` + Memory int `json:"memory"` + CPUs int `json:"cpus"` +} + +type V2JSONSegment struct { + Type string `json:"type"` // only "linear", "crypt" + Offset string `json:"offset"` + Size string `json:"size"` // numeric value or "dynamic" + Flags []string `json:"flags,omitempty"` + *V2JSONSegmentCrypt `json:",omitempty"` // type = "crypt" +} + +type V2JSONSegmentCrypt struct { + IVTweak int `json:"iv_tweak,string"` + Encryption string `json:"encryption"` + SectorSize int `json:"sector_size"` // 512 or 1024 or 2048 or 4096 + Integrity *V2JSONSegmentIntegrity `json:"integrity,omitempty"` +} + +type V2JSONSegmentIntegrity struct { + Type string `json:"type"` + JournalEncryption string `json:"journal_encryption"` + JournalIntegrity string `json:"journal_integrity"` +} + +type V2JSONDigest struct { + Type string `json:"type"` + Keyslots []string `json:"keyslots"` + Segments []string `json:"segments"` + Salt []byte `json:"salt"` + Digest []byte `json:"digest"` + *V2JSONDigestPbkdf2 // type == "pbkdf2" +} + +type V2JSONDigestPbkdf2 struct { + Hash string `json:"hash"` + Iterations int `json:"iterations"` +} + +type V2JSONConfig struct { + JsonSize int `json:"json_size,string"` + KeyslotsSize int `json:"keyslots_size,string,omitempty"` + Flags []string `json:"flags,omitempty"` // one or more of "allow-discards", "same-cpu-crypt", "submit-from-crypt-cpus", "no-journal", "no-read-workqueue", "no-write-workqueue" + Requirements []string `json:"requirements,omitempty"` +} + +type V2JSONToken struct { + Type string `json:"type"` // "luks2-keyring" + Keyslots []string `json:"keyslots,omitempty"` + *V2JSONTokenLUKS2Keyring // type == "luks2-keyring" +} + +type V2JSONTokenLUKS2Keyring struct { + KeyDescription string `json:"key_description"` +} diff --git a/vendor/github.com/containers/ocicrypt/config/pkcs11config/config.go b/vendor/github.com/containers/ocicrypt/config/pkcs11config/config.go deleted file mode 100644 index b4f0e4d3..00000000 --- a/vendor/github.com/containers/ocicrypt/config/pkcs11config/config.go +++ /dev/null @@ -1,123 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package pkcs11config - -import ( - "errors" - "fmt" - "os" - "path" - - "github.com/containers/ocicrypt/crypto/pkcs11" - "gopkg.in/yaml.v3" -) - -// OcicryptConfig represents the format of an imgcrypt.conf config file -type OcicryptConfig struct { - Pkcs11Config pkcs11.Pkcs11Config `yaml:"pkcs11"` -} - -const CONFIGFILE = "ocicrypt.conf" -const ENVVARNAME = "OCICRYPT_CONFIG" - -// parseConfigFile parses a configuration file; it is not an error if the configuration file does -// not exist, so no error is returned. -// A config file may look like this: -// module-directories: -// - /usr/lib64/pkcs11/ -// - /usr/lib/pkcs11/ -// allowed-module-paths: -// - /usr/lib64/pkcs11/ -// - /usr/lib/pkcs11/ -func parseConfigFile(filename string) (*OcicryptConfig, error) { - // a non-existent config file is not an error - _, err := os.Stat(filename) - if os.IsNotExist(err) { - return nil, nil - } - - data, err := os.ReadFile(filename) - if err != nil { - return nil, err - } - - ic := &OcicryptConfig{} - err = yaml.Unmarshal(data, ic) - return ic, err -} - -// getConfiguration tries to read the configuration file at the following locations -// 1) ${OCICRYPT_CONFIG} == "internal": use internal default allow-all policy -// 2) ${OCICRYPT_CONFIG} -// 3) ${XDG_CONFIG_HOME}/ocicrypt-pkcs11.conf -// 4) ${HOME}/.config/ocicrypt-pkcs11.conf -// 5) /etc/ocicrypt-pkcs11.conf -// If no configuration file could be found or read a null pointer is returned -func getConfiguration() (*OcicryptConfig, error) { - filename := os.Getenv(ENVVARNAME) - if len(filename) > 0 { - if filename == "internal" { - return getDefaultCryptoConfigOpts() - } - ic, err := parseConfigFile(filename) - if err != nil || ic != nil { - return ic, err - } - } - envvar := os.Getenv("XDG_CONFIG_HOME") - if len(envvar) > 0 { - ic, err := parseConfigFile(path.Join(envvar, CONFIGFILE)) - if err != nil || ic != nil { - return ic, err - } - } - envvar = os.Getenv("HOME") - if len(envvar) > 0 { - ic, err := parseConfigFile(path.Join(envvar, ".config", CONFIGFILE)) - if err != nil || ic != nil { - return ic, err - } - } - return parseConfigFile(path.Join("etc", CONFIGFILE)) -} - -// getDefaultCryptoConfigOpts returns default crypto config opts needed for pkcs11 module access -func getDefaultCryptoConfigOpts() (*OcicryptConfig, error) { - mdyaml := pkcs11.GetDefaultModuleDirectoriesYaml("") - config := fmt.Sprintf("module-directories:\n"+ - "%s"+ - "allowed-module-paths:\n"+ - "%s", mdyaml, mdyaml) - p11conf, err := pkcs11.ParsePkcs11ConfigFile([]byte(config)) - return &OcicryptConfig{ - Pkcs11Config: *p11conf, - }, err -} - -// GetUserPkcs11Config gets the user's Pkcs11Conig either from a configuration file or if none is -// found the default ones are returned -func GetUserPkcs11Config() (*pkcs11.Pkcs11Config, error) { - fmt.Print("Note: pkcs11 support is currently experimental\n") - ic, err := getConfiguration() - if err != nil { - return &pkcs11.Pkcs11Config{}, err - } - if ic == nil { - return &pkcs11.Pkcs11Config{}, errors.New("No ocicrypt config file was found") - } - return &ic.Pkcs11Config, nil -} diff --git a/vendor/github.com/containers/ocicrypt/helpers/parse_helpers.go b/vendor/github.com/containers/ocicrypt/helpers/parse_helpers.go deleted file mode 100644 index 18f4fa92..00000000 --- a/vendor/github.com/containers/ocicrypt/helpers/parse_helpers.go +++ /dev/null @@ -1,377 +0,0 @@ -package helpers - -import ( - "errors" - "fmt" - "os" - "strconv" - "strings" - - "github.com/containers/ocicrypt" - encconfig "github.com/containers/ocicrypt/config" - "github.com/containers/ocicrypt/config/pkcs11config" - "github.com/containers/ocicrypt/crypto/pkcs11" - encutils "github.com/containers/ocicrypt/utils" -) - -// processRecipientKeys sorts the array of recipients by type. Recipients may be either -// x509 certificates, public keys, or PGP public keys identified by email address or name -func processRecipientKeys(recipients []string) ([][]byte, [][]byte, [][]byte, [][]byte, [][]byte, [][]byte, error) { - var ( - gpgRecipients [][]byte - pubkeys [][]byte - x509s [][]byte - pkcs11Pubkeys [][]byte - pkcs11Yamls [][]byte - keyProviders [][]byte - ) - - for _, recipient := range recipients { - - idx := strings.Index(recipient, ":") - if idx < 0 { - return nil, nil, nil, nil, nil, nil, errors.New("Invalid recipient format") - } - - protocol := recipient[:idx] - value := recipient[idx+1:] - - switch protocol { - case "pgp": - gpgRecipients = append(gpgRecipients, []byte(value)) - - case "jwe": - tmp, err := os.ReadFile(value) - if err != nil { - return nil, nil, nil, nil, nil, nil, fmt.Errorf("Unable to read file: %w", err) - } - if !encutils.IsPublicKey(tmp) { - return nil, nil, nil, nil, nil, nil, errors.New("File provided is not a public key") - } - pubkeys = append(pubkeys, tmp) - - case "pkcs7": - tmp, err := os.ReadFile(value) - if err != nil { - return nil, nil, nil, nil, nil, nil, fmt.Errorf("Unable to read file: %w", err) - } - if !encutils.IsCertificate(tmp) { - return nil, nil, nil, nil, nil, nil, errors.New("File provided is not an x509 cert") - } - x509s = append(x509s, tmp) - - case "pkcs11": - tmp, err := os.ReadFile(value) - if err != nil { - return nil, nil, nil, nil, nil, nil, fmt.Errorf("Unable to read file: %w", err) - } - if encutils.IsPkcs11PublicKey(tmp) { - pkcs11Yamls = append(pkcs11Yamls, tmp) - } else if encutils.IsPublicKey(tmp) { - pkcs11Pubkeys = append(pkcs11Pubkeys, tmp) - } else { - return nil, nil, nil, nil, nil, nil, errors.New("Provided file is not a public key") - } - - case "provider": - keyProviders = append(keyProviders, []byte(value)) - - default: - return nil, nil, nil, nil, nil, nil, errors.New("Provided protocol not recognized") - } - } - return gpgRecipients, pubkeys, x509s, pkcs11Pubkeys, pkcs11Yamls, keyProviders, nil -} - -// processx509Certs processes x509 certificate files -func processx509Certs(keys []string) ([][]byte, error) { - var x509s [][]byte - for _, key := range keys { - fileName := strings.Split(key, ":")[0] - if _, err := os.Stat(fileName); os.IsNotExist(err) { - continue - } - tmp, err := os.ReadFile(fileName) - if err != nil { - return nil, fmt.Errorf("Unable to read file: %w", err) - } - if !encutils.IsCertificate(tmp) { - continue - } - x509s = append(x509s, tmp) - - } - return x509s, nil -} - -// processPwdString process a password that may be in any of the following formats: -// - file= -// - pass= -// - fd= -// - -func processPwdString(pwdString string) ([]byte, error) { - if strings.HasPrefix(pwdString, "file=") { - return os.ReadFile(pwdString[5:]) - } else if strings.HasPrefix(pwdString, "pass=") { - return []byte(pwdString[5:]), nil - } else if strings.HasPrefix(pwdString, "fd=") { - fdStr := pwdString[3:] - fd, err := strconv.Atoi(fdStr) - if err != nil { - return nil, fmt.Errorf("could not parse file descriptor %s: %w", fdStr, err) - } - f := os.NewFile(uintptr(fd), "pwdfile") - if f == nil { - return nil, fmt.Errorf("%s is not a valid file descriptor", fdStr) - } - defer f.Close() - pwd := make([]byte, 64) - n, err := f.Read(pwd) - if err != nil { - return nil, fmt.Errorf("could not read from file descriptor: %w", err) - } - return pwd[:n], nil - } - return []byte(pwdString), nil -} - -// processPrivateKeyFiles sorts the different types of private key files; private key files may either be -// private keys or GPG private key ring files. The private key files may include the password for the -// private key and take any of the following forms: -// - -// - :file= -// - :pass= -// - :fd= -// - : -// - keyprovider:<...> -func processPrivateKeyFiles(keyFilesAndPwds []string) ([][]byte, [][]byte, [][]byte, [][]byte, [][]byte, [][]byte, error) { - var ( - gpgSecretKeyRingFiles [][]byte - gpgSecretKeyPasswords [][]byte - privkeys [][]byte - privkeysPasswords [][]byte - pkcs11Yamls [][]byte - keyProviders [][]byte - err error - ) - // keys needed for decryption in case of adding a recipient - for _, keyfileAndPwd := range keyFilesAndPwds { - var password []byte - - // treat "provider" protocol separately - if strings.HasPrefix(keyfileAndPwd, "provider:") { - keyProviders = append(keyProviders, []byte(keyfileAndPwd[len("provider:"):])) - continue - } - parts := strings.Split(keyfileAndPwd, ":") - if len(parts) == 2 { - password, err = processPwdString(parts[1]) - if err != nil { - return nil, nil, nil, nil, nil, nil, err - } - } - - keyfile := parts[0] - tmp, err := os.ReadFile(keyfile) - if err != nil { - return nil, nil, nil, nil, nil, nil, err - } - isPrivKey, err := encutils.IsPrivateKey(tmp, password) - if encutils.IsPasswordError(err) { - return nil, nil, nil, nil, nil, nil, err - } - - if encutils.IsPkcs11PrivateKey(tmp) { - pkcs11Yamls = append(pkcs11Yamls, tmp) - } else if isPrivKey { - privkeys = append(privkeys, tmp) - privkeysPasswords = append(privkeysPasswords, password) - } else if encutils.IsGPGPrivateKeyRing(tmp) { - gpgSecretKeyRingFiles = append(gpgSecretKeyRingFiles, tmp) - gpgSecretKeyPasswords = append(gpgSecretKeyPasswords, password) - } else { - // ignore if file is not recognized, so as not to error if additional - // metadata/cert files exists - continue - } - } - return gpgSecretKeyRingFiles, gpgSecretKeyPasswords, privkeys, privkeysPasswords, pkcs11Yamls, keyProviders, nil -} - -// CreateDecryptCryptoConfig creates the CryptoConfig object that contains the necessary -// information to perform decryption from command line options. -func CreateDecryptCryptoConfig(keys []string, decRecipients []string) (encconfig.CryptoConfig, error) { - ccs := []encconfig.CryptoConfig{} - - // x509 cert is needed for PKCS7 decryption - _, _, x509s, _, _, _, err := processRecipientKeys(decRecipients) - if err != nil { - return encconfig.CryptoConfig{}, err - } - - // x509 certs can also be passed in via keys - x509FromKeys, err := processx509Certs(keys) - if err != nil { - return encconfig.CryptoConfig{}, err - } - x509s = append(x509s, x509FromKeys...) - - gpgSecretKeyRingFiles, gpgSecretKeyPasswords, privKeys, privKeysPasswords, pkcs11Yamls, keyProviders, err := processPrivateKeyFiles(keys) - if err != nil { - return encconfig.CryptoConfig{}, err - } - - if len(gpgSecretKeyRingFiles) > 0 { - gpgCc, err := encconfig.DecryptWithGpgPrivKeys(gpgSecretKeyRingFiles, gpgSecretKeyPasswords) - if err != nil { - return encconfig.CryptoConfig{}, err - } - ccs = append(ccs, gpgCc) - } - - /* TODO: Add in GPG client query for secret keys in the future. - _, err = createGPGClient(context) - gpgInstalled := err == nil - if gpgInstalled { - if len(gpgSecretKeyRingFiles) == 0 && len(privKeys) == 0 && len(pkcs11Yamls) == 0 && len(keyProviders) == 0 && descs != nil { - // Get pgp private keys from keyring only if no private key was passed - gpgPrivKeys, gpgPrivKeyPasswords, err := getGPGPrivateKeys(context, gpgSecretKeyRingFiles, descs, true) - if err != nil { - return encconfig.CryptoConfig{}, err - } - - gpgCc, err := encconfig.DecryptWithGpgPrivKeys(gpgPrivKeys, gpgPrivKeyPasswords) - if err != nil { - return encconfig.CryptoConfig{}, err - } - ccs = append(ccs, gpgCc) - - } else if len(gpgSecretKeyRingFiles) > 0 { - gpgCc, err := encconfig.DecryptWithGpgPrivKeys(gpgSecretKeyRingFiles, gpgSecretKeyPasswords) - if err != nil { - return encconfig.CryptoConfig{}, err - } - ccs = append(ccs, gpgCc) - - } - } - */ - - if len(x509s) > 0 { - x509sCc, err := encconfig.DecryptWithX509s(x509s) - if err != nil { - return encconfig.CryptoConfig{}, err - } - ccs = append(ccs, x509sCc) - } - if len(privKeys) > 0 { - privKeysCc, err := encconfig.DecryptWithPrivKeys(privKeys, privKeysPasswords) - if err != nil { - return encconfig.CryptoConfig{}, err - } - ccs = append(ccs, privKeysCc) - } - if len(pkcs11Yamls) > 0 { - p11conf, err := pkcs11config.GetUserPkcs11Config() - if err != nil { - return encconfig.CryptoConfig{}, err - } - pkcs11PrivKeysCc, err := encconfig.DecryptWithPkcs11Yaml(p11conf, pkcs11Yamls) - if err != nil { - return encconfig.CryptoConfig{}, err - } - ccs = append(ccs, pkcs11PrivKeysCc) - } - if len(keyProviders) > 0 { - keyProviderCc, err := encconfig.DecryptWithKeyProvider(keyProviders) - if err != nil { - return encconfig.CryptoConfig{}, err - } - ccs = append(ccs, keyProviderCc) - } - return encconfig.CombineCryptoConfigs(ccs), nil -} - -// CreateCryptoConfig from the list of recipient strings and list of key paths of private keys -func CreateCryptoConfig(recipients []string, keys []string) (encconfig.CryptoConfig, error) { - var decryptCc *encconfig.CryptoConfig - ccs := []encconfig.CryptoConfig{} - if len(keys) > 0 { - dcc, err := CreateDecryptCryptoConfig(keys, []string{}) - if err != nil { - return encconfig.CryptoConfig{}, err - } - decryptCc = &dcc - ccs = append(ccs, dcc) - } - - if len(recipients) > 0 { - gpgRecipients, pubKeys, x509s, pkcs11Pubkeys, pkcs11Yamls, keyProvider, err := processRecipientKeys(recipients) - if err != nil { - return encconfig.CryptoConfig{}, err - } - encryptCcs := []encconfig.CryptoConfig{} - - // Create GPG client with guessed GPG version and default homedir - gpgClient, err := ocicrypt.NewGPGClient("", "") - gpgInstalled := err == nil - if len(gpgRecipients) > 0 && gpgInstalled { - gpgPubRingFile, err := gpgClient.ReadGPGPubRingFile() - if err != nil { - return encconfig.CryptoConfig{}, err - } - - gpgCc, err := encconfig.EncryptWithGpg(gpgRecipients, gpgPubRingFile) - if err != nil { - return encconfig.CryptoConfig{}, err - } - encryptCcs = append(encryptCcs, gpgCc) - } - - // Create Encryption Crypto Config - if len(x509s) > 0 { - pkcs7Cc, err := encconfig.EncryptWithPkcs7(x509s) - if err != nil { - return encconfig.CryptoConfig{}, err - } - encryptCcs = append(encryptCcs, pkcs7Cc) - } - if len(pubKeys) > 0 { - jweCc, err := encconfig.EncryptWithJwe(pubKeys) - if err != nil { - return encconfig.CryptoConfig{}, err - } - encryptCcs = append(encryptCcs, jweCc) - } - var p11conf *pkcs11.Pkcs11Config - if len(pkcs11Yamls) > 0 || len(pkcs11Pubkeys) > 0 { - p11conf, err = pkcs11config.GetUserPkcs11Config() - if err != nil { - return encconfig.CryptoConfig{}, err - } - pkcs11Cc, err := encconfig.EncryptWithPkcs11(p11conf, pkcs11Pubkeys, pkcs11Yamls) - if err != nil { - return encconfig.CryptoConfig{}, err - } - encryptCcs = append(encryptCcs, pkcs11Cc) - } - - if len(keyProvider) > 0 { - keyProviderCc, err := encconfig.EncryptWithKeyProvider(keyProvider) - if err != nil { - return encconfig.CryptoConfig{}, err - } - encryptCcs = append(encryptCcs, keyProviderCc) - } - ecc := encconfig.CombineCryptoConfigs(encryptCcs) - if decryptCc != nil { - ecc.EncryptConfig.AttachDecryptConfig(decryptCc.DecryptConfig) - } - ccs = append(ccs, ecc) - } - - if len(ccs) > 0 { - return encconfig.CombineCryptoConfigs(ccs), nil - } - return encconfig.CryptoConfig{}, nil -} diff --git a/vendor/github.com/containers/storage/.cirrus.yml b/vendor/github.com/containers/storage/.cirrus.yml index 00c69d7f..c41dd5da 100644 --- a/vendor/github.com/containers/storage/.cirrus.yml +++ b/vendor/github.com/containers/storage/.cirrus.yml @@ -17,13 +17,13 @@ env: #### #### Cache-image names to test with (double-quotes around names are critical) ### - FEDORA_NAME: "fedora-38" + FEDORA_NAME: "fedora-39ß" DEBIAN_NAME: "debian-13" # GCE project where images live IMAGE_PROJECT: "libpod-218412" # VM Image built in containers/automation_images - IMAGE_SUFFIX: "c20230816t191118z-f38f37d13" + IMAGE_SUFFIX: "c20231004t194547z-f39f38d13" FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}" DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}" @@ -113,8 +113,6 @@ debian_testing_task: &debian_testing TEST_DRIVER: "fuse-overlay-whiteout" - env: TEST_DRIVER: "btrfs" - - env: - TEST_DRIVER: "zfs" lint_task: diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION index 7ef40656..ba0a7191 100644 --- a/vendor/github.com/containers/storage/VERSION +++ b/vendor/github.com/containers/storage/VERSION @@ -1 +1 @@ -1.50.2 +1.51.0 diff --git a/vendor/github.com/containers/storage/drivers/driver.go b/vendor/github.com/containers/storage/drivers/driver.go index 1fb04dc3..ab32d652 100644 --- a/vendor/github.com/containers/storage/drivers/driver.go +++ b/vendor/github.com/containers/storage/drivers/driver.go @@ -189,6 +189,9 @@ type DriverWithDifferOutput struct { BigData map[string][]byte TarSplit []byte TOCDigest digest.Digest + // Artifacts is a collection of additional artifacts + // generated by the differ that the storage driver can use. + Artifacts map[string]interface{} } type DifferOutputFormat int diff --git a/vendor/github.com/containers/storage/drivers/overlay/check.go b/vendor/github.com/containers/storage/drivers/overlay/check.go index 60980994..d8139f65 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/check.go +++ b/vendor/github.com/containers/storage/drivers/overlay/check.go @@ -275,3 +275,36 @@ func supportsIdmappedLowerLayers(home string) (bool, error) { }() return true, nil } + +// supportsDataOnlyLayers checks if the kernel supports mounting a overlay file system +// that uses data-only layers. +func supportsDataOnlyLayers(home string) (bool, error) { + layerDir, err := os.MkdirTemp(home, "compat") + if err != nil { + return false, err + } + defer func() { + _ = os.RemoveAll(layerDir) + }() + + mergedDir := filepath.Join(layerDir, "merged") + lowerDir := filepath.Join(layerDir, "lower") + lowerDirDataOnly := filepath.Join(layerDir, "lower-data") + upperDir := filepath.Join(layerDir, "upper") + workDir := filepath.Join(layerDir, "work") + + _ = idtools.MkdirAs(mergedDir, 0o700, 0, 0) + _ = idtools.MkdirAs(lowerDir, 0o700, 0, 0) + _ = idtools.MkdirAs(lowerDirDataOnly, 0o700, 0, 0) + _ = idtools.MkdirAs(upperDir, 0o700, 0, 0) + _ = idtools.MkdirAs(workDir, 0o700, 0, 0) + + opts := fmt.Sprintf("lowerdir=%s::%s,upperdir=%s,workdir=%s,metacopy=on", lowerDir, lowerDirDataOnly, upperDir, workDir) + flags := uintptr(0) + if err := unix.Mount("overlay", mergedDir, "overlay", flags, opts); err != nil { + return false, err + } + _ = unix.Unmount(mergedDir, unix.MNT_DETACH) + + return true, nil +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/composefs_notsupported.go b/vendor/github.com/containers/storage/drivers/overlay/composefs_notsupported.go index 5cdbcff6..347e4d35 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/composefs_notsupported.go +++ b/vendor/github.com/containers/storage/drivers/overlay/composefs_notsupported.go @@ -11,7 +11,7 @@ func composeFsSupported() bool { return false } -func generateComposeFsBlob(toc []byte, composefsDir string) error { +func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, composefsDir string) error { return fmt.Errorf("composefs is not supported") } @@ -19,6 +19,6 @@ func mountComposefsBlob(dataDir, mountPoint string) error { return fmt.Errorf("composefs is not supported") } -func enableVerityRecursive(path string) error { - return fmt.Errorf("composefs is not supported") +func enableVerityRecursive(path string) (map[string]string, error) { + return nil, fmt.Errorf("composefs is not supported") } diff --git a/vendor/github.com/containers/storage/drivers/overlay/composefs_supported.go b/vendor/github.com/containers/storage/drivers/overlay/composefs_supported.go index aaf76913..26dd3686 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/composefs_supported.go +++ b/vendor/github.com/containers/storage/drivers/overlay/composefs_supported.go @@ -4,7 +4,6 @@ package overlay import ( - "bytes" "encoding/binary" "errors" "fmt" @@ -16,6 +15,7 @@ import ( "syscall" "unsafe" + "github.com/containers/storage/pkg/chunked/dump" "github.com/containers/storage/pkg/loopback" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" @@ -29,7 +29,7 @@ var ( func getComposeFsHelper() (string, error) { composeFsHelperOnce.Do(func() { - composeFsHelperPath, composeFsHelperErr = exec.LookPath("composefs-from-json") + composeFsHelperPath, composeFsHelperErr = exec.LookPath("mkcomposefs") }) return composeFsHelperPath, composeFsHelperErr } @@ -53,7 +53,23 @@ func enableVerity(description string, fd int) error { return nil } -func enableVerityRecursive(path string) error { +type verityDigest struct { + Fsv unix.FsverityDigest + Buf [64]byte +} + +func measureVerity(description string, fd int) (string, error) { + var digest verityDigest + digest.Fsv.Size = 64 + _, _, e1 := syscall.Syscall(unix.SYS_IOCTL, uintptr(fd), uintptr(unix.FS_IOC_MEASURE_VERITY), uintptr(unsafe.Pointer(&digest))) + if e1 != 0 { + return "", fmt.Errorf("failed to measure verity for %q: %w", description, e1) + } + return fmt.Sprintf("%x", digest.Buf[:digest.Fsv.Size]), nil +} + +func enableVerityRecursive(root string) (map[string]string, error) { + digests := make(map[string]string) walkFn := func(path string, d fs.DirEntry, err error) error { if err != nil { return err @@ -71,29 +87,47 @@ func enableVerityRecursive(path string) error { if err := enableVerity(path, int(f.Fd())); err != nil { return err } + + verity, err := measureVerity(path, int(f.Fd())) + if err != nil { + return err + } + + relPath, err := filepath.Rel(root, path) + if err != nil { + return err + } + + digests[relPath] = verity return nil } - return filepath.WalkDir(path, walkFn) + err := filepath.WalkDir(root, walkFn) + return digests, err } func getComposefsBlob(dataDir string) string { return filepath.Join(dataDir, "composefs.blob") } -func generateComposeFsBlob(toc []byte, composefsDir string) error { +func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, composefsDir string) error { if err := os.MkdirAll(composefsDir, 0o700); err != nil { return err } + dumpReader, err := dump.GenerateDump(toc, verityDigests) + if err != nil { + return err + } + destFile := getComposefsBlob(composefsDir) writerJson, err := getComposeFsHelper() if err != nil { - return fmt.Errorf("failed to find composefs-from-json: %w", err) + return fmt.Errorf("failed to find mkcomposefs: %w", err) } fd, err := unix.Openat(unix.AT_FDCWD, destFile, unix.O_WRONLY|unix.O_CREAT|unix.O_TRUNC|unix.O_EXCL|unix.O_CLOEXEC, 0o644) if err != nil { - return fmt.Errorf("failed to open output file: %w", err) + return fmt.Errorf("failed to open output file %q: %w", destFile, err) } outFd := os.NewFile(uintptr(fd), "outFd") @@ -109,10 +143,10 @@ func generateComposeFsBlob(toc []byte, composefsDir string) error { // a scope to close outFd before setting fsverity on the read-only fd. defer outFd.Close() - cmd := exec.Command(writerJson, "--format=erofs", "--out=/proc/self/fd/3", "/proc/self/fd/0") + cmd := exec.Command(writerJson, "--from-file", "-", "/proc/self/fd/3") cmd.ExtraFiles = []*os.File{outFd} cmd.Stderr = os.Stderr - cmd.Stdin = bytes.NewReader(toc) + cmd.Stdin = dumpReader if err := cmd.Run(); err != nil { return fmt.Errorf("failed to convert json to erofs: %w", err) } @@ -166,7 +200,7 @@ func hasACL(path string) (bool, error) { func mountComposefsBlob(dataDir, mountPoint string) error { blobFile := getComposefsBlob(dataDir) - loop, err := loopback.AttachLoopDevice(blobFile) + loop, err := loopback.AttachLoopDeviceRO(blobFile) if err != nil { return err } diff --git a/vendor/github.com/containers/storage/drivers/overlay/mount.go b/vendor/github.com/containers/storage/drivers/overlay/mount.go index 33e60b11..8829e55e 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/mount.go +++ b/vendor/github.com/containers/storage/drivers/overlay/mount.go @@ -141,14 +141,27 @@ func mountOverlayFromMain() { // the new value for the list of lowers, because it's shorter. if lowerv != "" { lowers := strings.Split(lowerv, ":") - for i := range lowers { - lowerFd, err := unix.Open(lowers[i], unix.O_RDONLY, 0) + var newLowers []string + dataOnly := false + for _, lowerPath := range lowers { + if lowerPath == "" { + dataOnly = true + continue + } + lowerFd, err := unix.Open(lowerPath, unix.O_RDONLY, 0) if err != nil { fatal(err) } - lowers[i] = fmt.Sprintf("%d", lowerFd) + var lower string + if dataOnly { + lower = fmt.Sprintf(":%d", lowerFd) + dataOnly = false + } else { + lower = fmt.Sprintf("%d", lowerFd) + } + newLowers = append(newLowers, lower) } - lowerv = strings.Join(lowers, ":") + lowerv = strings.Join(newLowers, ":") } // Reconstruct the Label field. diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go index 0f6d7402..04ecf871 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go @@ -82,7 +82,7 @@ const ( lowerFile = "lower" maxDepth = 500 - zstdChunkedManifest = "zstd-chunked-manifest" + tocArtifact = "toc" // idLength represents the number of random characters // which can be used to create the unique link identifier @@ -1003,8 +1003,10 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable } } if parent != "" { - parentBase, parentImageStore, _ := d.dir2(parent) - if parentImageStore != "" { + parentBase, parentImageStore, inAdditionalStore := d.dir2(parent) + // If parentBase path is additional image store, select the image contained in parentBase. + // See https://github.com/containers/podman/issues/19748 + if parentImageStore != "" && !inAdditionalStore { parentBase = parentImageStore } st, err := system.Stat(filepath.Join(parentBase, "diff")) @@ -1079,12 +1081,13 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable } if parent != "" { - parentDir, parentImageStore, _ := d.dir2(parent) - base := parentDir - if parentImageStore != "" { - base = parentImageStore + parentBase, parentImageStore, inAdditionalStore := d.dir2(parent) + // If parentBase path is additional image store, select the image contained in parentBase. + // See https://github.com/containers/podman/issues/19748 + if parentImageStore != "" && !inAdditionalStore { + parentBase = parentImageStore } - st, err := system.Stat(filepath.Join(base, "diff")) + st, err := system.Stat(filepath.Join(parentBase, "diff")) if err != nil { return err } @@ -1447,7 +1450,9 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO needsIDMapping := !disableShifting && len(options.UidMaps) > 0 && len(options.GidMaps) > 0 && d.options.mountProgram == "" if len(optsList) == 0 { - optsList = strings.Split(d.options.mountOptions, ",") + if d.options.mountOptions != "" { + optsList = strings.Split(d.options.mountOptions, ",") + } } else { // If metacopy=on is present in d.options.mountOptions it must be present in the mount // options otherwise the kernel refuses to follow the metacopy xattr. @@ -1524,15 +1529,8 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO defer cleanupFunc() } - composefsLayers := filepath.Join(workDirBase, "composefs-layers") - if err := os.MkdirAll(composefsLayers, 0o700); err != nil { - return "", err - } - skipIDMappingLayers := make(map[string]string) - composeFsLayers := []string{} - composefsMounts := []string{} defer func() { for _, m := range composefsMounts { @@ -1540,7 +1538,9 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO } }() - maybeAddComposefsMount := func(lowerID string, i int) (string, error) { + composeFsLayers := []string{} + composeFsLayersDir := filepath.Join(workDirBase, "composefs-layers") + maybeAddComposefsMount := func(lowerID string, i int, readWrite bool) (string, error) { composefsBlob := d.getComposefsData(lowerID) _, err = os.Stat(composefsBlob) if err != nil { @@ -1551,7 +1551,11 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO } logrus.Debugf("overlay: using composefs blob %s for lower %s", composefsBlob, lowerID) - dest := filepath.Join(composefsLayers, fmt.Sprintf("%d", i)) + if readWrite && i == 0 { + return "", fmt.Errorf("cannot mount a composefs layer as writeable") + } + + dest := filepath.Join(composeFsLayersDir, fmt.Sprintf("%d", i)) if err := os.MkdirAll(dest, 0o700); err != nil { return "", err } @@ -1571,7 +1575,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO diffDir := path.Join(workDirBase, "diff") - if dest, err := maybeAddComposefsMount(id, 0); err != nil { + if dest, err := maybeAddComposefsMount(id, 0, readWrite); err != nil { return "", err } else if dest != "" { diffDir = dest @@ -1623,7 +1627,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO return "", err } lowerID := filepath.Base(filepath.Dir(linkContent)) - composefsMount, err := maybeAddComposefsMount(lowerID, i+1) + composefsMount, err := maybeAddComposefsMount(lowerID, i+1, readWrite) if err != nil { return "", err } @@ -1655,8 +1659,6 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO optsList = append(optsList, "metacopy=on", "redirect_dir=on") } - absLowers = append(absLowers, composeFsLayers...) - if len(absLowers) == 0 { absLowers = append(absLowers, path.Join(dir, "empty")) } @@ -1750,11 +1752,20 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO absLowers = newAbsDir } + lowerDirs := strings.Join(absLowers, ":") + if len(composeFsLayers) > 0 { + composeFsLayersLowerDirs := strings.Join(composeFsLayers, "::") + lowerDirs = lowerDirs + "::" + composeFsLayersLowerDirs + } + // absLowers is not valid anymore now as we have added composeFsLayers to it, so prevent + // its usage. + absLowers = nil //nolint:ineffassign + var opts string if readWrite { - opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), diffDir, workdir) + opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDirs, diffDir, workdir) } else { - opts = fmt.Sprintf("lowerdir=%s:%s", diffDir, strings.Join(absLowers, ":")) + opts = fmt.Sprintf("lowerdir=%s:%s", diffDir, lowerDirs) } if len(optsList) > 0 { opts = fmt.Sprintf("%s,%s", opts, strings.Join(optsList, ",")) @@ -1798,9 +1809,9 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO if readWrite { diffDir := path.Join(id, "diff") workDir := path.Join(id, "work") - opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), diffDir, workDir) + opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDirs, diffDir, workDir) } else { - opts = fmt.Sprintf("lowerdir=%s:%s", diffDir, strings.Join(absLowers, ":")) + opts = fmt.Sprintf("lowerdir=%s:%s", diffDir, lowerDirs) } if len(optsList) > 0 { opts = strings.Join(append([]string{opts}, optsList...), ",") @@ -2007,11 +2018,34 @@ func (d *Driver) CleanupStagingDirectory(stagingDirectory string) error { return os.RemoveAll(stagingDirectory) } +func (d *Driver) supportsDataOnlyLayers() (bool, error) { + feature := "dataonly-layers" + overlayCacheResult, overlayCacheText, err := cachedFeatureCheck(d.runhome, feature) + if err == nil { + if overlayCacheResult { + logrus.Debugf("Cached value indicated that data-only layers for overlay are supported") + return true, nil + } + logrus.Debugf("Cached value indicated that data-only layers for overlay are not supported") + return false, errors.New(overlayCacheText) + } + supportsDataOnly, err := supportsDataOnlyLayers(d.home) + if err2 := cachedFeatureRecord(d.runhome, feature, supportsDataOnly, ""); err2 != nil { + return false, fmt.Errorf("recording overlay data-only layers support status: %w", err2) + } + return supportsDataOnly, err +} + func (d *Driver) useComposeFs() bool { if !composeFsSupported() || unshare.IsRootless() { return false } - return true + supportsDataOnlyLayers, err := d.supportsDataOnlyLayers() + if err != nil { + logrus.Debugf("Check for data-only layers failed with: %v", err) + return false + } + return supportsDataOnlyLayers } // ApplyDiff applies the changes in the new layer using the specified function @@ -2074,11 +2108,12 @@ func (d *Driver) ApplyDiffFromStagingDirectory(id, parent, stagingDirectory stri if d.useComposeFs() { // FIXME: move this logic into the differ so we don't have to open // the file twice. - if err := enableVerityRecursive(stagingDirectory); err != nil && !errors.Is(err, unix.ENOTSUP) && !errors.Is(err, unix.ENOTTY) { + verityDigests, err := enableVerityRecursive(stagingDirectory) + if err != nil && !errors.Is(err, unix.ENOTSUP) && !errors.Is(err, unix.ENOTTY) { logrus.Warningf("%s", err) } - toc := diffOutput.BigData[zstdChunkedManifest] - if err := generateComposeFsBlob(toc, d.getComposefsData(id)); err != nil { + toc := diffOutput.Artifacts[tocArtifact] + if err := generateComposeFsBlob(verityDigests, toc, d.getComposefsData(id)); err != nil { return err } } diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go index e5835757..d105e73f 100644 --- a/vendor/github.com/containers/storage/layers.go +++ b/vendor/github.com/containers/storage/layers.go @@ -1245,8 +1245,8 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount if parentLayer != nil { parent = parentLayer.ID } - var parentMappings, templateIDMappings, oldMappings *idtools.IDMappings var ( + templateIDMappings *idtools.IDMappings templateMetadata string templateCompressedDigest digest.Digest templateCompressedSize int64 @@ -1274,11 +1274,6 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount } else { templateIDMappings = &idtools.IDMappings{} } - if parentLayer != nil { - parentMappings = idtools.NewIDMappingsFromMaps(parentLayer.UIDMap, parentLayer.GIDMap) - } else { - parentMappings = &idtools.IDMappings{} - } if mountLabel != "" { selinux.ReserveLabel(mountLabel) } @@ -1353,6 +1348,12 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount IDMappings: idMappings, } + var parentMappings, oldMappings *idtools.IDMappings + if parentLayer != nil { + parentMappings = idtools.NewIDMappingsFromMaps(parentLayer.UIDMap, parentLayer.GIDMap) + } else { + parentMappings = &idtools.IDMappings{} + } if moreOptions.TemplateLayer != "" { if err = r.driver.CreateFromTemplate(id, moreOptions.TemplateLayer, templateIDMappings, parent, parentMappings, &opts, writeable); err != nil { cleanupFailureContext = fmt.Sprintf("creating a layer from template layer %q", moreOptions.TemplateLayer) @@ -1371,10 +1372,13 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount return nil, -1, fmt.Errorf("creating read-only layer with ID %q: %w", id, err) } } - oldMappings = parentMappings + if parentLayer != nil { + oldMappings = parentMappings + } } - if !reflect.DeepEqual(oldMappings.UIDs(), idMappings.UIDs()) || !reflect.DeepEqual(oldMappings.GIDs(), idMappings.GIDs()) { + if oldMappings != nil && + (!reflect.DeepEqual(oldMappings.UIDs(), idMappings.UIDs()) || !reflect.DeepEqual(oldMappings.GIDs(), idMappings.GIDs())) { if err = r.driver.UpdateLayerIDMap(id, oldMappings, idMappings, mountLabel); err != nil { cleanupFailureContext = "in UpdateLayerIDMap" return nil, -1, err diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go index 29f800b2..05d25711 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive.go @@ -955,14 +955,8 @@ func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) err if options.ForceMask != nil { // if ForceMask is in place, make sure lchown is disabled. doChown = false - uid, gid, mode, err := GetFileOwner(dest) - if err == nil { - value := fmt.Sprintf("%d:%d:0%o", uid, gid, mode) - if err := system.Lsetxattr(dest, idtools.ContainersOverrideXattr, []byte(value), 0); err != nil { - return err - } - } } + var rootHdr *tar.Header // Iterate through the files in the archive. loop: @@ -1007,6 +1001,9 @@ loop: if err != nil { return err } + if rel == "." { + rootHdr = hdr + } if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) } @@ -1080,6 +1077,14 @@ loop: return err } } + + if options.ForceMask != nil && rootHdr != nil { + value := fmt.Sprintf("%d:%d:0%o", rootHdr.Uid, rootHdr.Gid, rootHdr.Mode) + if err := system.Lsetxattr(dest, idtools.ContainersOverrideXattr, []byte(value), 0); err != nil { + return err + } + } + return nil } diff --git a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go index 56c30e26..5d4befc2 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go @@ -26,6 +26,8 @@ import ( const ( cacheKey = "chunked-manifest-cache" cacheVersion = 1 + + digestSha256Empty = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" ) type metadata struct { @@ -650,6 +652,9 @@ func unmarshalToc(manifest []byte) (*internal.TOC, error) { iter.Skip() } } + if m.Type == TypeReg && m.Size == 0 && m.Digest == "" { + m.Digest = digestSha256Empty + } toc.Entries = append(toc.Entries, m) } break diff --git a/vendor/github.com/containers/storage/pkg/chunked/dump/dump.go b/vendor/github.com/containers/storage/pkg/chunked/dump/dump.go new file mode 100644 index 00000000..a0892803 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/dump/dump.go @@ -0,0 +1,230 @@ +package dump + +import ( + "bufio" + "fmt" + "io" + "strings" + "time" + "unicode" + + "github.com/containers/storage/pkg/chunked/internal" + "golang.org/x/sys/unix" +) + +const ( + ESCAPE_STANDARD = 0 + NOESCAPE_SPACE = 1 << iota + ESCAPE_EQUAL + ESCAPE_LONE_DASH +) + +func escaped(val string, escape int) string { + noescapeSpace := escape&NOESCAPE_SPACE != 0 + escapeEqual := escape&ESCAPE_EQUAL != 0 + escapeLoneDash := escape&ESCAPE_LONE_DASH != 0 + + length := len(val) + + if escapeLoneDash && val == "-" { + return fmt.Sprintf("\\x%.2x", val[0]) + } + + var result string + for i := 0; i < length; i++ { + c := val[i] + hexEscape := false + var special string + + switch c { + case '\\': + special = "\\\\" + case '\n': + special = "\\n" + case '\r': + special = "\\r" + case '\t': + special = "\\t" + case '=': + hexEscape = escapeEqual + default: + if noescapeSpace { + hexEscape = !unicode.IsPrint(rune(c)) + } else { + hexEscape = !unicode.IsGraphic(rune(c)) + } + } + + if special != "" { + result += special + } else if hexEscape { + result += fmt.Sprintf("\\x%.2x", c) + } else { + result += string(c) + } + } + return result +} + +func escapedOptional(val string, escape int) string { + if val == "" { + return "-" + } + return escaped(val, escape) +} + +func getStMode(mode uint32, typ string) (uint32, error) { + switch typ { + case internal.TypeReg, internal.TypeLink: + mode |= unix.S_IFREG + case internal.TypeChar: + mode |= unix.S_IFCHR + case internal.TypeBlock: + mode |= unix.S_IFBLK + case internal.TypeDir: + mode |= unix.S_IFDIR + case internal.TypeFifo: + mode |= unix.S_IFIFO + case internal.TypeSymlink: + mode |= unix.S_IFLNK + default: + return 0, fmt.Errorf("unknown type %s", typ) + } + return mode, nil +} + +func dumpNode(out io.Writer, links map[string]int, verityDigests map[string]string, entry *internal.FileMetadata) error { + path := entry.Name + if path == "" { + path = "/" + } else if path[0] != '/' { + path = "/" + path + } + + if _, err := fmt.Fprint(out, escaped(path, ESCAPE_STANDARD)); err != nil { + return err + } + + nlinks := links[entry.Name] + links[entry.Linkname] + 1 + link := "" + if entry.Type == internal.TypeLink { + link = "@" + } + + rdev := unix.Mkdev(uint32(entry.Devmajor), uint32(entry.Devminor)) + + entryTime := entry.ModTime + if entryTime == nil { + t := time.Unix(0, 0) + entryTime = &t + } + + mode, err := getStMode(uint32(entry.Mode), entry.Type) + if err != nil { + return err + } + + if _, err := fmt.Fprintf(out, " %d %s%o %d %d %d %d %d.%d ", entry.Size, + link, mode, + nlinks, entry.UID, entry.GID, rdev, + entryTime.Unix(), entryTime.Nanosecond()); err != nil { + return err + } + + var payload string + if entry.Linkname != "" { + payload = entry.Linkname + if entry.Type == internal.TypeLink && payload[0] != '/' { + payload = "/" + payload + } + } else { + if len(entry.Digest) > 10 { + d := strings.Replace(entry.Digest, "sha256:", "", 1) + payload = d[:2] + "/" + d[2:] + } + } + + if _, err := fmt.Fprintf(out, escapedOptional(payload, ESCAPE_LONE_DASH)); err != nil { + return err + } + + /* inline content. */ + if _, err := fmt.Fprint(out, " -"); err != nil { + return err + } + + /* store the digest. */ + if _, err := fmt.Fprint(out, " "); err != nil { + return err + } + digest := verityDigests[payload] + if _, err := fmt.Fprintf(out, escapedOptional(digest, ESCAPE_LONE_DASH)); err != nil { + return err + } + + for k, v := range entry.Xattrs { + name := escaped(k, ESCAPE_EQUAL) + value := escaped(v, ESCAPE_EQUAL) + + if _, err := fmt.Fprintf(out, " %s=%s", name, value); err != nil { + return err + } + } + if _, err := fmt.Fprint(out, "\n"); err != nil { + return err + } + return nil +} + +// GenerateDump generates a dump of the TOC in the same format as `composefs-info dump` +func GenerateDump(tocI interface{}, verityDigests map[string]string) (io.Reader, error) { + toc, ok := tocI.(*internal.TOC) + if !ok { + return nil, fmt.Errorf("invalid TOC type") + } + pipeR, pipeW := io.Pipe() + go func() { + closed := false + w := bufio.NewWriter(pipeW) + defer func() { + if !closed { + w.Flush() + pipeW.Close() + } + }() + + links := make(map[string]int) + for _, e := range toc.Entries { + if e.Linkname == "" { + continue + } + links[e.Linkname] = links[e.Linkname] + 1 + } + + if len(toc.Entries) == 0 || (toc.Entries[0].Name != "" && toc.Entries[0].Name != "/") { + root := &internal.FileMetadata{ + Name: "/", + Type: internal.TypeDir, + Mode: 0o755, + } + + if err := dumpNode(w, links, verityDigests, root); err != nil { + pipeW.CloseWithError(err) + closed = true + return + } + } + + for _, e := range toc.Entries { + if e.Type == internal.TypeChunk { + continue + } + if err := dumpNode(w, links, verityDigests, &e); err != nil { + pipeW.CloseWithError(err) + closed = true + return + } + } + }() + return pipeR, nil +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go index 088c9278..8493a2c1 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go @@ -45,6 +45,7 @@ const ( bigDataKey = "zstd-chunked-manifest" chunkedData = "zstd-chunked-data" chunkedLayerDataKey = "zstd-chunked-layer-data" + tocKey = "toc" fileTypeZstdChunked = iota fileTypeEstargz @@ -1470,10 +1471,7 @@ func makeEntriesFlat(mergedEntries []internal.FileMetadata) ([]internal.FileMeta continue } if mergedEntries[i].Digest == "" { - if mergedEntries[i].Size != 0 { - return nil, fmt.Errorf("missing digest for %q", mergedEntries[i].Name) - } - continue + return nil, fmt.Errorf("missing digest for %q", mergedEntries[i].Name) } digest, err := digest.Parse(mergedEntries[i].Digest) if err != nil { @@ -1542,6 +1540,13 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff if err != nil { return graphdriver.DriverWithDifferOutput{}, err } + + // Generate the manifest + toc, err := unmarshalToc(c.manifest) + if err != nil { + return graphdriver.DriverWithDifferOutput{}, err + } + output := graphdriver.DriverWithDifferOutput{ Differ: c, TarSplit: c.tarSplit, @@ -1549,6 +1554,9 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff bigDataKey: c.manifest, chunkedLayerDataKey: lcdBigData, }, + Artifacts: map[string]interface{}{ + tocKey: toc, + }, TOCDigest: c.contentDigest, } @@ -1563,12 +1571,6 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff // List of OSTree repositories to use for deduplication ostreeRepos := strings.Split(c.storeOpts.PullOptions["ostree_repos"], ":") - // Generate the manifest - toc, err := unmarshalToc(c.manifest) - if err != nil { - return output, err - } - whiteoutConverter := archive.GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData) var missingParts []missingPart diff --git a/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go b/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go index b8bfa589..40d8fd2b 100644 --- a/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go +++ b/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go @@ -114,6 +114,16 @@ func openNextAvailableLoopback(index int, sparseName string, sparseFile *os.File // AttachLoopDevice attaches the given sparse file to the next // available loopback device. It returns an opened *os.File. func AttachLoopDevice(sparseName string) (loop *os.File, err error) { + return attachLoopDevice(sparseName, false) +} + +// AttachLoopDeviceRO attaches the given sparse file opened read-only to +// the next available loopback device. It returns an opened *os.File. +func AttachLoopDeviceRO(sparseName string) (loop *os.File, err error) { + return attachLoopDevice(sparseName, true) +} + +func attachLoopDevice(sparseName string, readonly bool) (loop *os.File, err error) { // Try to retrieve the next available loopback device via syscall. // If it fails, we discard error and start looping for a // loopback from index 0. @@ -122,8 +132,14 @@ func AttachLoopDevice(sparseName string) (loop *os.File, err error) { logrus.Debugf("Error retrieving the next available loopback: %s", err) } + var sparseFile *os.File + // OpenFile adds O_CLOEXEC - sparseFile, err := os.OpenFile(sparseName, os.O_RDWR, 0o644) + if readonly { + sparseFile, err = os.OpenFile(sparseName, os.O_RDONLY, 0o644) + } else { + sparseFile, err = os.OpenFile(sparseName, os.O_RDWR, 0o644) + } if err != nil { logrus.Errorf("Opening sparse file: %v", err) return nil, ErrAttachLoopbackDevice diff --git a/vendor/github.com/containers/storage/pkg/system/rm.go b/vendor/github.com/containers/storage/pkg/system/rm.go index 5917fa25..12243707 100644 --- a/vendor/github.com/containers/storage/pkg/system/rm.go +++ b/vendor/github.com/containers/storage/pkg/system/rm.go @@ -28,7 +28,7 @@ func EnsureRemoveAll(dir string) error { // track retries exitOnErr := make(map[string]int) - maxRetry := 100 + maxRetry := 1000 // Attempt a simple remove all first, this avoids the more expensive // RecursiveUnmount call if not needed. @@ -38,7 +38,7 @@ func EnsureRemoveAll(dir string) error { // Attempt to unmount anything beneath this dir first if err := mount.RecursiveUnmount(dir); err != nil { - logrus.Debugf("RecusiveUnmount on %s failed: %v", dir, err) + logrus.Debugf("RecursiveUnmount on %s failed: %v", dir, err) } for { @@ -94,6 +94,6 @@ func EnsureRemoveAll(dir string) error { return err } exitOnErr[pe.Path]++ - time.Sleep(100 * time.Millisecond) + time.Sleep(10 * time.Millisecond) } } diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_darwin.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_darwin.go index 86ac12ec..480e2fcb 100644 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare_darwin.go +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_darwin.go @@ -25,6 +25,11 @@ func GetRootlessUID() int { return os.Getuid() } +// GetRootlessGID returns the GID of the user in the parent userNS +func GetRootlessGID() int { + return os.Getgid() +} + // RootlessEnv returns the environment settings for the rootless containers func RootlessEnv() []string { return append(os.Environ(), UsernsEnvName+"=") diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go index e169633d..a8dc1ba0 100644 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go @@ -441,6 +441,16 @@ func GetRootlessUID() int { return os.Getuid() } +// GetRootlessGID returns the GID of the user in the parent userNS +func GetRootlessGID() int { + gidEnv := getenv("_CONTAINERS_ROOTLESS_GID") + if gidEnv != "" { + u, _ := strconv.Atoi(gidEnv) + return u + } + return os.Getgid() +} + // RootlessEnv returns the environment settings for the rootless containers func RootlessEnv() []string { return append(os.Environ(), UsernsEnvName+"=done") diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go index 66dd5459..83de680c 100644 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go @@ -25,6 +25,11 @@ func GetRootlessUID() int { return os.Getuid() } +// GetRootlessGID returns the GID of the user in the parent userNS +func GetRootlessGID() int { + return os.Getgid() +} + // RootlessEnv returns the environment settings for the rootless containers func RootlessEnv() []string { return append(os.Environ(), UsernsEnvName+"=") diff --git a/vendor/github.com/containers/storage/storage.conf b/vendor/github.com/containers/storage/storage.conf index 7082c0b7..cb4525f2 100644 --- a/vendor/github.com/containers/storage/storage.conf +++ b/vendor/github.com/containers/storage/storage.conf @@ -27,9 +27,8 @@ runroot = "/run/containers/storage" # restorecon -R -v /NEWSTORAGEPATH graphroot = "/var/lib/containers/storage" -# Optional value for image storage location -# If set, it must be different than graphroot. - +# Optional alternate location of image store if a location separate from the +# container store is required. If set, it must be different than graphroot. # imagestore = "" diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go index dc9d09b8..6753b296 100644 --- a/vendor/github.com/containers/storage/store.go +++ b/vendor/github.com/containers/storage/store.go @@ -2666,34 +2666,23 @@ func (s *store) DeleteContainer(id string) error { } var wg multierror.Group - wg.Go(func() error { return s.containerStore.Delete(id) }) middleDir := s.graphDriverName + "-containers" wg.Go(func() error { gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID) - // attempt a simple rm -rf first - if err := os.RemoveAll(gcpath); err == nil { - return nil - } - // and if it fails get to the more complicated cleanup return system.EnsureRemoveAll(gcpath) }) wg.Go(func() error { rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID) - // attempt a simple rm -rf first - if err := os.RemoveAll(rcpath); err == nil { - return nil - } - // and if it fails get to the more complicated cleanup return system.EnsureRemoveAll(rcpath) }) if multierr := wg.Wait(); multierr != nil { return multierr.ErrorOrNil() } - return nil + return s.containerStore.Delete(id) }) } @@ -3418,16 +3407,16 @@ func (s *store) Shutdown(force bool) ([]string, error) { err = fmt.Errorf("a layer is mounted: %w", ErrLayerUsedByContainer) } if err == nil { - err = s.graphDriver.Cleanup() // We don’t retain the lastWrite value, and treat this update as if someone else did the .Cleanup(), // so that we reload after a .Shutdown() the same way other processes would. // Shutdown() is basically an error path, so reliability is more important than performance. if _, err2 := s.graphLock.RecordWrite(); err2 != nil { - if err == nil { - err = err2 - } else { - err = fmt.Errorf("(graphLock.RecordWrite failed: %v) %w", err2, err) - } + err = fmt.Errorf("graphLock.RecordWrite failed: %w", err2) + } + // Do the Cleanup() only after we are sure that the change was recorded with RecordWrite(), so that + // the next user picks it. + if err == nil { + err = s.graphDriver.Cleanup() } } return mounted, err diff --git a/vendor/github.com/containers/storage/types/options.go b/vendor/github.com/containers/storage/types/options.go index ab041a07..5ae667a4 100644 --- a/vendor/github.com/containers/storage/types/options.go +++ b/vendor/github.com/containers/storage/types/options.go @@ -220,9 +220,8 @@ type StoreOptions struct { // GraphRoot is the filesystem path under which we will store the // contents of layers, images, and containers. GraphRoot string `json:"root,omitempty"` - // Image Store is the location of image store which is seperated from the - // container store. Usually this is not recommended unless users wants - // seperate store for image and containers. + // Image Store is the alternate location of image store if a location + // separate from the container store is required. ImageStore string `json:"imagestore,omitempty"` // RootlessStoragePath is the storage path for rootless users // default $HOME/.local/share/containers/storage diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml index 7635b9f6..d98bc1bf 100644 --- a/vendor/github.com/docker/docker/api/swagger.yaml +++ b/vendor/github.com/docker/docker/api/swagger.yaml @@ -3011,8 +3011,6 @@ definitions: Name: "journald" - Type: "Log" Name: "json-file" - - Type: "Log" - Name: "logentries" - Type: "Log" Name: "splunk" - Type: "Log" @@ -5334,7 +5332,7 @@ definitions: type: "array" items: type: "string" - example: ["awslogs", "fluentd", "gcplogs", "gelf", "journald", "json-file", "logentries", "splunk", "syslog"] + example: ["awslogs", "fluentd", "gcplogs", "gelf", "journald", "json-file", "splunk", "syslog"] RegistryServiceConfig: diff --git a/vendor/github.com/docker/docker/client/container_wait.go b/vendor/github.com/docker/docker/client/container_wait.go index 2375eb1e..b7d80542 100644 --- a/vendor/github.com/docker/docker/client/container_wait.go +++ b/vendor/github.com/docker/docker/client/container_wait.go @@ -66,8 +66,12 @@ func (cli *Client) ContainerWait(ctx context.Context, containerID string, condit // // If there's a JSON parsing error, read the real error message // off the body and send it to the client. - _, _ = io.ReadAll(io.LimitReader(stream, containerWaitErrorMsgLimit)) - errC <- errors.New(responseText.String()) + if errors.As(err, new(*json.SyntaxError)) { + _, _ = io.ReadAll(io.LimitReader(stream, containerWaitErrorMsgLimit)) + errC <- errors.New(responseText.String()) + } else { + errC <- err + } return } diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go index a4001c3b..2194c47d 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go @@ -11,17 +11,11 @@ import ( "os/exec" "path/filepath" "strconv" - "sync" "syscall" "github.com/opencontainers/runc/libcontainer/user" ) -var ( - entOnce sync.Once - getentCmd string -) - func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error { path, err := filepath.Abs(path) if err != nil { @@ -162,10 +156,10 @@ func getentGroup(name string) (user.Group, error) { } func callGetent(database, key string) (io.Reader, error) { - entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") }) - // if no `getent` command on host, can't do anything else - if getentCmd == "" { - return nil, fmt.Errorf("unable to find getent command") + getentCmd, err := resolveBinary("getent") + // if no `getent` command within the execution environment, can't do anything else + if err != nil { + return nil, fmt.Errorf("unable to find getent command: %w", err) } command := exec.Command(getentCmd, database, key) // we run getent within container filesystem, but without /dev so /dev/null is not available for exec to mock stdin diff --git a/vendor/github.com/docker/docker/pkg/ioutils/readers.go b/vendor/github.com/docker/docker/pkg/ioutils/readers.go index de00b95e..d82ae930 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/readers.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/readers.go @@ -3,11 +3,15 @@ package ioutils // import "github.com/docker/docker/pkg/ioutils" import ( "context" "io" + "runtime/debug" + "sync/atomic" // make sure crypto.SHA256, crypto.sha512 and crypto.SHA384 are registered // TODO remove once https://github.com/opencontainers/go-digest/pull/64 is merged. _ "crypto/sha256" _ "crypto/sha512" + + "github.com/sirupsen/logrus" ) // ReadCloserWrapper wraps an io.Reader, and implements an io.ReadCloser @@ -16,10 +20,15 @@ import ( type ReadCloserWrapper struct { io.Reader closer func() error + closed atomic.Bool } // Close calls back the passed closer function func (r *ReadCloserWrapper) Close() error { + if !r.closed.CompareAndSwap(false, true) { + subsequentCloseWarn("ReadCloserWrapper") + return nil + } return r.closer() } @@ -87,6 +96,7 @@ type cancelReadCloser struct { cancel func() pR *io.PipeReader // Stream to read from pW *io.PipeWriter + closed atomic.Bool } // NewCancelReadCloser creates a wrapper that closes the ReadCloser when the @@ -146,6 +156,17 @@ func (p *cancelReadCloser) closeWithError(err error) { // Close closes the wrapper its underlying reader. It will cause // future calls to Read to return io.EOF. func (p *cancelReadCloser) Close() error { + if !p.closed.CompareAndSwap(false, true) { + subsequentCloseWarn("cancelReadCloser") + return nil + } p.closeWithError(io.EOF) return nil } + +func subsequentCloseWarn(name string) { + logrus.Error("subsequent attempt to close " + name) + if logrus.GetLevel() >= logrus.DebugLevel { + logrus.Errorf("stack trace: %s", string(debug.Stack())) + } +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writers.go b/vendor/github.com/docker/docker/pkg/ioutils/writers.go index 61c67949..1f50602f 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/writers.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/writers.go @@ -1,6 +1,9 @@ package ioutils // import "github.com/docker/docker/pkg/ioutils" -import "io" +import ( + "io" + "sync/atomic" +) // NopWriter represents a type which write operation is nop. type NopWriter struct{} @@ -29,9 +32,14 @@ func (f *NopFlusher) Flush() {} type writeCloserWrapper struct { io.Writer closer func() error + closed atomic.Bool } func (r *writeCloserWrapper) Close() error { + if !r.closed.CompareAndSwap(false, true) { + subsequentCloseWarn("WriteCloserWrapper") + return nil + } return r.closer() } diff --git a/vendor/github.com/docker/go-connections/nat/nat.go b/vendor/github.com/docker/go-connections/nat/nat.go index 296c96a6..4049d780 100644 --- a/vendor/github.com/docker/go-connections/nat/nat.go +++ b/vendor/github.com/docker/go-connections/nat/nat.go @@ -177,27 +177,27 @@ func ParsePortSpec(rawPort string) ([]PortMapping, error) { // Strip [] from IPV6 addresses rawIP, _, err := net.SplitHostPort(ip + ":") if err != nil { - return nil, fmt.Errorf("Invalid ip address %v: %s", ip, err) + return nil, fmt.Errorf("invalid IP address %v: %w", ip, err) } ip = rawIP } if ip != "" && net.ParseIP(ip) == nil { - return nil, fmt.Errorf("Invalid ip address: %s", ip) + return nil, fmt.Errorf("invalid IP address: %s", ip) } if containerPort == "" { - return nil, fmt.Errorf("No port specified: %s", rawPort) + return nil, fmt.Errorf("no port specified: %s", rawPort) } startPort, endPort, err := ParsePortRange(containerPort) if err != nil { - return nil, fmt.Errorf("Invalid containerPort: %s", containerPort) + return nil, fmt.Errorf("invalid containerPort: %s", containerPort) } var startHostPort, endHostPort uint64 = 0, 0 if len(hostPort) > 0 { startHostPort, endHostPort, err = ParsePortRange(hostPort) if err != nil { - return nil, fmt.Errorf("Invalid hostPort: %s", hostPort) + return nil, fmt.Errorf("invalid hostPort: %s", hostPort) } } @@ -206,12 +206,12 @@ func ParsePortSpec(rawPort string) ([]PortMapping, error) { // In this case, use the host port range as the dynamic // host port range to allocate into. if endPort != startPort { - return nil, fmt.Errorf("Invalid ranges specified for container and host Ports: %s and %s", containerPort, hostPort) + return nil, fmt.Errorf("invalid ranges specified for container and host Ports: %s and %s", containerPort, hostPort) } } if !validateProto(strings.ToLower(proto)) { - return nil, fmt.Errorf("Invalid proto: %s", proto) + return nil, fmt.Errorf("invalid proto: %s", proto) } ports := []PortMapping{} diff --git a/vendor/github.com/docker/go-connections/nat/parse.go b/vendor/github.com/docker/go-connections/nat/parse.go index 892adf8c..e4b53e8a 100644 --- a/vendor/github.com/docker/go-connections/nat/parse.go +++ b/vendor/github.com/docker/go-connections/nat/parse.go @@ -6,34 +6,10 @@ import ( "strings" ) -// PartParser parses and validates the specified string (data) using the specified template -// e.g. ip:public:private -> 192.168.0.1:80:8000 -// DEPRECATED: do not use, this function may be removed in a future version -func PartParser(template, data string) (map[string]string, error) { - // ip:public:private - var ( - templateParts = strings.Split(template, ":") - parts = strings.Split(data, ":") - out = make(map[string]string, len(templateParts)) - ) - if len(parts) != len(templateParts) { - return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template) - } - - for i, t := range templateParts { - value := "" - if len(parts) > i { - value = parts[i] - } - out[t] = value - } - return out, nil -} - // ParsePortRange parses and validates the specified string as a port-range (8000-9000) func ParsePortRange(ports string) (uint64, uint64, error) { if ports == "" { - return 0, 0, fmt.Errorf("Empty string specified for ports.") + return 0, 0, fmt.Errorf("empty string specified for ports") } if !strings.Contains(ports, "-") { start, err := strconv.ParseUint(ports, 10, 16) @@ -51,7 +27,7 @@ func ParsePortRange(ports string) (uint64, uint64, error) { return 0, 0, err } if end < start { - return 0, 0, fmt.Errorf("Invalid range specified for the Port: %s", ports) + return 0, 0, fmt.Errorf("invalid range specified for port: %s", ports) } return start, end, nil } diff --git a/vendor/github.com/docker/go-connections/sockets/sockets.go b/vendor/github.com/docker/go-connections/sockets/sockets.go index 2e9e9006..b0eae239 100644 --- a/vendor/github.com/docker/go-connections/sockets/sockets.go +++ b/vendor/github.com/docker/go-connections/sockets/sockets.go @@ -3,16 +3,23 @@ package sockets import ( "errors" + "net" "net/http" + "time" ) +const defaultTimeout = 10 * time.Second + // ErrProtocolNotAvailable is returned when a given transport protocol is not provided by the operating system. var ErrProtocolNotAvailable = errors.New("protocol not available") -// ConfigureTransport configures the specified Transport according to the -// specified proto and addr. -// If the proto is unix (using a unix socket to communicate) or npipe the -// compression is disabled. +// ConfigureTransport configures the specified [http.Transport] according to the specified proto +// and addr. +// +// If the proto is unix (using a unix socket to communicate) or npipe the compression is disabled. +// For other protos, compression is enabled. If you want to manually enable/disable compression, +// make sure you do it _after_ any subsequent calls to ConfigureTransport is made against the same +// [http.Transport]. func ConfigureTransport(tr *http.Transport, proto, addr string) error { switch proto { case "unix": @@ -21,6 +28,10 @@ func ConfigureTransport(tr *http.Transport, proto, addr string) error { return configureNpipeTransport(tr, proto, addr) default: tr.Proxy = http.ProxyFromEnvironment + tr.DisableCompression = false + tr.DialContext = (&net.Dialer{ + Timeout: defaultTimeout, + }).DialContext } return nil } diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_unix.go b/vendor/github.com/docker/go-connections/sockets/sockets_unix.go index 10d76342..78a34a98 100644 --- a/vendor/github.com/docker/go-connections/sockets/sockets_unix.go +++ b/vendor/github.com/docker/go-connections/sockets/sockets_unix.go @@ -1,4 +1,4 @@ -// +build !windows +//go:build !windows package sockets @@ -11,14 +11,11 @@ import ( "time" ) -const ( - defaultTimeout = 10 * time.Second - maxUnixSocketPathSize = len(syscall.RawSockaddrUnix{}.Path) -) +const maxUnixSocketPathSize = len(syscall.RawSockaddrUnix{}.Path) func configureUnixTransport(tr *http.Transport, proto, addr string) error { if len(addr) > maxUnixSocketPathSize { - return fmt.Errorf("Unix socket path %q is too long", addr) + return fmt.Errorf("unix socket path %q is too long", addr) } // No need for compression in local communications. tr.DisableCompression = true diff --git a/vendor/github.com/docker/go-connections/sockets/unix_socket.go b/vendor/github.com/docker/go-connections/sockets/unix_socket.go index e7591e6e..b9233521 100644 --- a/vendor/github.com/docker/go-connections/sockets/unix_socket.go +++ b/vendor/github.com/docker/go-connections/sockets/unix_socket.go @@ -1,9 +1,9 @@ -// +build !windows +//go:build !windows /* Package sockets is a simple unix domain socket wrapper. -Usage +# Usage For example: @@ -42,7 +42,7 @@ For example: if _, err := conn.Read(buf); err != nil { panic(err) } else if string(buf) != echoStr { - panic(fmt.Errorf("Msg may lost")) + panic(fmt.Errorf("msg may lost")) } } */ @@ -103,7 +103,7 @@ func NewUnixSocketWithOpts(path string, opts ...SockOption) (net.Listener, error // We don't use "defer" here, to reset the umask to its original value as soon // as possible. Ideally we'd be able to detect if WithChmod() was passed as // an option, and skip changing umask if default permissions are used. - origUmask := syscall.Umask(0777) + origUmask := syscall.Umask(0o777) l, err := net.Listen("unix", path) syscall.Umask(origUmask) if err != nil { @@ -122,5 +122,5 @@ func NewUnixSocketWithOpts(path string, opts ...SockOption) (net.Listener, error // NewUnixSocket creates a unix socket with the specified path and group. func NewUnixSocket(path string, gid int) (net.Listener, error) { - return NewUnixSocketWithOpts(path, WithChown(0, gid), WithChmod(0660)) + return NewUnixSocketWithOpts(path, WithChown(0, gid), WithChmod(0o660)) } diff --git a/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go b/vendor/github.com/docker/go-connections/tlsconfig/certpool.go similarity index 95% rename from vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go rename to vendor/github.com/docker/go-connections/tlsconfig/certpool.go index 1ca0965e..f84c624b 100644 --- a/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go +++ b/vendor/github.com/docker/go-connections/tlsconfig/certpool.go @@ -1,5 +1,3 @@ -// +build go1.7 - package tlsconfig import ( diff --git a/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go b/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go deleted file mode 100644 index 1ff81c33..00000000 --- a/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !go1.7 - -package tlsconfig - -import ( - "crypto/x509" -) - -// SystemCertPool returns an new empty cert pool, -// accessing system cert pool is supported in go 1.7 -func SystemCertPool() (*x509.CertPool, error) { - return x509.NewCertPool(), nil -} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config.go b/vendor/github.com/docker/go-connections/tlsconfig/config.go index 99296837..606c98a3 100644 --- a/vendor/github.com/docker/go-connections/tlsconfig/config.go +++ b/vendor/github.com/docker/go-connections/tlsconfig/config.go @@ -1,6 +1,7 @@ // Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. // // As a reminder from https://golang.org/pkg/crypto/tls/#Config: +// // A Config structure is used to configure a TLS client or server. After one has been passed to a TLS function it must not be modified. // A Config may be reused; the tls package will also not modify it. package tlsconfig @@ -9,11 +10,9 @@ import ( "crypto/tls" "crypto/x509" "encoding/pem" + "errors" "fmt" - "io/ioutil" "os" - - "github.com/pkg/errors" ) // Options represents the information needed to create client and server TLS configurations. @@ -36,7 +35,12 @@ type Options struct { ExclusiveRootPools bool MinVersion uint16 // If Passphrase is set, it will be used to decrypt a TLS private key - // if the key is encrypted + // if the key is encrypted. + // + // Deprecated: Use of encrypted TLS private keys has been deprecated, and + // will be removed in a future release. Golang has deprecated support for + // legacy PEM encryption (as specified in RFC 1423), as it is insecure by + // design (see https://go-review.googlesource.com/c/go/+/264159). Passphrase string } @@ -99,7 +103,7 @@ func certPool(caFile string, exclusivePool bool) (*x509.CertPool, error) { return nil, fmt.Errorf("failed to read system certificates: %v", err) } } - pemData, err := ioutil.ReadFile(caFile) + pemData, err := os.ReadFile(caFile) if err != nil { return nil, fmt.Errorf("could not read CA certificate %q: %v", caFile, err) } @@ -109,6 +113,15 @@ func certPool(caFile string, exclusivePool bool) (*x509.CertPool, error) { return certPool, nil } +// allTLSVersions lists all the TLS versions and is used by the code that validates +// a uint16 value as a TLS version. +var allTLSVersions = map[uint16]struct{}{ + tls.VersionTLS10: {}, + tls.VersionTLS11: {}, + tls.VersionTLS12: {}, + tls.VersionTLS13: {}, +} + // isValidMinVersion checks that the input value is a valid tls minimum version func isValidMinVersion(version uint16) bool { _, ok := allTLSVersions[version] @@ -120,10 +133,10 @@ func isValidMinVersion(version uint16) bool { func adjustMinVersion(options Options, config *tls.Config) error { if options.MinVersion > 0 { if !isValidMinVersion(options.MinVersion) { - return fmt.Errorf("Invalid minimum TLS version: %x", options.MinVersion) + return fmt.Errorf("invalid minimum TLS version: %x", options.MinVersion) } if options.MinVersion < config.MinVersion { - return fmt.Errorf("Requested minimum TLS version is too low. Should be at-least: %x", config.MinVersion) + return fmt.Errorf("requested minimum TLS version is too low. Should be at-least: %x", config.MinVersion) } config.MinVersion = options.MinVersion } @@ -132,9 +145,14 @@ func adjustMinVersion(options Options, config *tls.Config) error { } // IsErrEncryptedKey returns true if the 'err' is an error of incorrect -// password when trying to decrypt a TLS private key +// password when trying to decrypt a TLS private key. +// +// Deprecated: Use of encrypted TLS private keys has been deprecated, and +// will be removed in a future release. Golang has deprecated support for +// legacy PEM encryption (as specified in RFC 1423), as it is insecure by +// design (see https://go-review.googlesource.com/c/go/+/264159). func IsErrEncryptedKey(err error) bool { - return errors.Cause(err) == x509.IncorrectPasswordError + return errors.Is(err, x509.IncorrectPasswordError) } // getPrivateKey returns the private key in 'keyBytes', in PEM-encoded format. @@ -151,7 +169,7 @@ func getPrivateKey(keyBytes []byte, passphrase string) ([]byte, error) { if x509.IsEncryptedPEMBlock(pemBlock) { //nolint:staticcheck // Ignore SA1019 (IsEncryptedPEMBlock is deprecated) keyBytes, err = x509.DecryptPEMBlock(pemBlock, []byte(passphrase)) //nolint:staticcheck // Ignore SA1019 (DecryptPEMBlock is deprecated) if err != nil { - return nil, errors.Wrap(err, "private key is encrypted, but could not decrypt it") + return nil, fmt.Errorf("private key is encrypted, but could not decrypt it: %w", err) } keyBytes = pem.EncodeToMemory(&pem.Block{Type: pemBlock.Type, Bytes: keyBytes}) } @@ -167,26 +185,24 @@ func getCert(options Options) ([]tls.Certificate, error) { return nil, nil } - errMessage := "Could not load X509 key pair" - - cert, err := ioutil.ReadFile(options.CertFile) + cert, err := os.ReadFile(options.CertFile) if err != nil { - return nil, errors.Wrap(err, errMessage) + return nil, err } - prKeyBytes, err := ioutil.ReadFile(options.KeyFile) + prKeyBytes, err := os.ReadFile(options.KeyFile) if err != nil { - return nil, errors.Wrap(err, errMessage) + return nil, err } prKeyBytes, err = getPrivateKey(prKeyBytes, options.Passphrase) if err != nil { - return nil, errors.Wrap(err, errMessage) + return nil, err } tlsCert, err := tls.X509KeyPair(cert, prKeyBytes) if err != nil { - return nil, errors.Wrap(err, errMessage) + return nil, err } return []tls.Certificate{tlsCert}, nil @@ -206,7 +222,7 @@ func Client(options Options) (*tls.Config, error) { tlsCerts, err := getCert(options) if err != nil { - return nil, err + return nil, fmt.Errorf("could not load X509 key pair: %w", err) } tlsConfig.Certificates = tlsCerts @@ -224,9 +240,9 @@ func Server(options Options) (*tls.Config, error) { tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile) if err != nil { if os.IsNotExist(err) { - return nil, fmt.Errorf("Could not load X509 key pair (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err) + return nil, fmt.Errorf("could not load X509 key pair (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err) } - return nil, fmt.Errorf("Error reading X509 key pair (cert: %q, key: %q): %v. Make sure the key is not encrypted.", options.CertFile, options.KeyFile, err) + return nil, fmt.Errorf("error reading X509 key pair - make sure the key is not encrypted (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err) } tlsConfig.Certificates = []tls.Certificate{tlsCert} if options.ClientAuth >= tls.VerifyClientCertIfGiven && options.CAFile != "" { diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go b/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go index 6b4c6a7c..a82f9fa5 100644 --- a/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go +++ b/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go @@ -1,7 +1,4 @@ -// +build go1.5 - // Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. -// package tlsconfig import ( diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go b/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go deleted file mode 100644 index ee22df47..00000000 --- a/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !go1.5 - -// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. -// -package tlsconfig - -import ( - "crypto/tls" -) - -// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) -var clientCipherSuites = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, -} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/versions_go113.go b/vendor/github.com/docker/go-connections/tlsconfig/versions_go113.go deleted file mode 100644 index d8215f8e..00000000 --- a/vendor/github.com/docker/go-connections/tlsconfig/versions_go113.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build go1.13 - -package tlsconfig - -import ( - "crypto/tls" -) - -// allTLSVersions lists all the TLS versions and is used by the code that validates -// a uint16 value as a TLS version. -var allTLSVersions = map[uint16]struct{}{ - tls.VersionTLS10: {}, - tls.VersionTLS11: {}, - tls.VersionTLS12: {}, - tls.VersionTLS13: {}, -} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/versions_other.go b/vendor/github.com/docker/go-connections/tlsconfig/versions_other.go deleted file mode 100644 index a5ba7f4a..00000000 --- a/vendor/github.com/docker/go-connections/tlsconfig/versions_other.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !go1.13 - -package tlsconfig - -import ( - "crypto/tls" -) - -// allTLSVersions lists all the TLS versions and is used by the code that validates -// a uint16 value as a TLS version. -var allTLSVersions = map[uint16]struct{}{ - tls.VersionTLS10: {}, - tls.VersionTLS11: {}, - tls.VersionTLS12: {}, -} diff --git a/vendor/github.com/go-jose/go-jose/v3/BUG-BOUNTY.md b/vendor/github.com/go-jose/go-jose/v3/BUG-BOUNTY.md deleted file mode 100644 index 3305db0f..00000000 --- a/vendor/github.com/go-jose/go-jose/v3/BUG-BOUNTY.md +++ /dev/null @@ -1,10 +0,0 @@ -Serious about security -====================== - -Square recognizes the important contributions the security research community -can make. We therefore encourage reporting security issues with the code -contained in this repository. - -If you believe you have discovered a security vulnerability, please follow the -guidelines at . - diff --git a/vendor/github.com/go-jose/go-jose/v3/CHANGELOG.md b/vendor/github.com/go-jose/go-jose/v3/CHANGELOG.md index 7820c2f4..ce2a54eb 100644 --- a/vendor/github.com/go-jose/go-jose/v3/CHANGELOG.md +++ b/vendor/github.com/go-jose/go-jose/v3/CHANGELOG.md @@ -1,6 +1,76 @@ +# v4.0.1 + +## Fixed + + - An attacker could send a JWE containing compressed data that used large + amounts of memory and CPU when decompressed by `Decrypt` or `DecryptMulti`. + Those functions now return an error if the decompressed data would exceed + 250kB or 10x the compressed size (whichever is larger). Thanks to + Enze Wang@Alioth and Jianjun Chen@Zhongguancun Lab (@zer0yu and @chenjj) + for reporting. + +# v4.0.0 + +This release makes some breaking changes in order to more thoroughly +address the vulnerabilities discussed in [Three New Attacks Against JSON Web +Tokens][1], "Sign/encrypt confusion", "Billion hash attack", and "Polyglot +token". + +## Changed + + - Limit JWT encryption types (exclude password or public key types) (#78) + - Enforce minimum length for HMAC keys (#85) + - jwt: match any audience in a list, rather than requiring all audiences (#81) + - jwt: accept only Compact Serialization (#75) + - jws: Add expected algorithms for signatures (#74) + - Require specifying expected algorithms for ParseEncrypted, + ParseSigned, ParseDetached, jwt.ParseEncrypted, jwt.ParseSigned, + jwt.ParseSignedAndEncrypted (#69, #74) + - Usually there is a small, known set of appropriate algorithms for a program + to use and it's a mistake to allow unexpected algorithms. For instance the + "billion hash attack" relies in part on programs accepting the PBES2 + encryption algorithm and doing the necessary work even if they weren't + specifically configured to allow PBES2. + - Revert "Strip padding off base64 strings" (#82) + - The specs require base64url encoding without padding. + - Minimum supported Go version is now 1.21 + +## Added + + - ParseSignedCompact, ParseSignedJSON, ParseEncryptedCompact, ParseEncryptedJSON. + - These allow parsing a specific serialization, as opposed to ParseSigned and + ParseEncrypted, which try to automatically detect which serialization was + provided. It's common to require a specific serialization for a specific + protocol - for instance JWT requires Compact serialization. + +[1]: https://i.blackhat.com/BH-US-23/Presentations/US-23-Tervoort-Three-New-Attacks-Against-JSON-Web-Tokens.pdf + +# v3.0.3 + +## Fixed + + - Limit decompression output size to prevent a DoS. Backport from v4.0.1. + +# v3.0.2 + +## Fixed + + - DecryptMulti: handle decompression error (#19) + +## Changed + + - jwe/CompactSerialize: improve performance (#67) + - Increase the default number of PBKDF2 iterations to 600k (#48) + - Return the proper algorithm for ECDSA keys (#45) + +## Added + + - Add Thumbprint support for opaque signers (#38) + # v3.0.1 -Fixed: +## Fixed + - Security issue: an attacker specifying a large "p2c" value can cause JSONWebEncryption.Decrypt and JSONWebEncryption.DecryptMulti to consume large amounts of CPU, causing a DoS. Thanks to Matt Schwager (@mschwager) for the diff --git a/vendor/github.com/go-jose/go-jose/v3/README.md b/vendor/github.com/go-jose/go-jose/v3/README.md index b90c7e5c..282cd9e1 100644 --- a/vendor/github.com/go-jose/go-jose/v3/README.md +++ b/vendor/github.com/go-jose/go-jose/v3/README.md @@ -1,10 +1,17 @@ # Go JOSE -[![godoc](http://img.shields.io/badge/godoc-jose_package-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2) -[![godoc](http://img.shields.io/badge/godoc-jwt_package-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2/jwt) -[![license](http://img.shields.io/badge/license-apache_2.0-blue.svg?style=flat)](https://raw.githubusercontent.com/go-jose/go-jose/master/LICENSE) -[![build](https://travis-ci.org/go-jose/go-jose.svg?branch=master)](https://travis-ci.org/go-jose/go-jose) -[![coverage](https://coveralls.io/repos/github/go-jose/go-jose/badge.svg?branch=master)](https://coveralls.io/r/go-jose/go-jose) +### Versions + +[Version 4](https://github.com/go-jose/go-jose) +([branch](https://github.com/go-jose/go-jose/), +[doc](https://pkg.go.dev/github.com/go-jose/go-jose/v4), [releases](https://github.com/go-jose/go-jose/releases)) is the current stable version: + + import "github.com/go-jose/go-jose/v4" + +The old [square/go-jose](https://github.com/square/go-jose) repo contains the prior v1 and v2 versions, which +are deprecated. + +### Summary Package jose aims to provide an implementation of the Javascript Object Signing and Encryption set of standards. This includes support for JSON Web Encryption, @@ -21,13 +28,13 @@ US maintained blocked list. ## Overview The implementation follows the -[JSON Web Encryption](http://dx.doi.org/10.17487/RFC7516) (RFC 7516), -[JSON Web Signature](http://dx.doi.org/10.17487/RFC7515) (RFC 7515), and -[JSON Web Token](http://dx.doi.org/10.17487/RFC7519) (RFC 7519) specifications. +[JSON Web Encryption](https://dx.doi.org/10.17487/RFC7516) (RFC 7516), +[JSON Web Signature](https://dx.doi.org/10.17487/RFC7515) (RFC 7515), and +[JSON Web Token](https://dx.doi.org/10.17487/RFC7519) (RFC 7519) specifications. Tables of supported algorithms are shown below. The library supports both the compact and JWS/JWE JSON Serialization formats, and has optional support for multiple recipients. It also comes with a small command-line utility -([`jose-util`](https://github.com/go-jose/go-jose/tree/master/jose-util)) +([`jose-util`](https://pkg.go.dev/github.com/go-jose/go-jose/jose-util)) for dealing with JOSE messages in a shell. **Note**: We use a forked version of the `encoding/json` package from the Go @@ -36,31 +43,10 @@ of [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/curren This is to avoid differences in interpretation of messages between go-jose and libraries in other languages. -### Versions - -[Version 2](https://gopkg.in/go-jose/go-jose.v2) -([branch](https://github.com/go-jose/go-jose/tree/v2), -[doc](https://godoc.org/gopkg.in/go-jose/go-jose.v2)) is the current stable version: - - import "gopkg.in/go-jose/go-jose.v2" - -[Version 3](https://github.com/go-jose/go-jose) -([branch](https://github.com/go-jose/go-jose/tree/master), -[doc](https://godoc.org/github.com/go-jose/go-jose)) is the under development/unstable version (not released yet): - - import "github.com/go-jose/go-jose/v3" - -All new feature development takes place on the `master` branch, which we are -preparing to release as version 3 soon. Version 2 will continue to receive -critical bug and security fixes. Note that starting with version 3 we are -using Go modules for versioning instead of `gopkg.in` as before. Version 3 also will require Go version 1.13 or higher. - -Version 1 (on the `v1` branch) is frozen and not supported anymore. - ### Supported algorithms See below for a table of supported algorithms. Algorithm identifiers match -the names in the [JSON Web Algorithms](http://dx.doi.org/10.17487/RFC7518) +the names in the [JSON Web Algorithms](https://dx.doi.org/10.17487/RFC7518) standard where possible. The Godoc reference has a list of constants. Key encryption | Algorithm identifier(s) @@ -103,20 +89,20 @@ allows attaching a key id. Algorithm(s) | Corresponding types :------------------------- | ------------------------------- - RSA | *[rsa.PublicKey](http://golang.org/pkg/crypto/rsa/#PublicKey), *[rsa.PrivateKey](http://golang.org/pkg/crypto/rsa/#PrivateKey) - ECDH, ECDSA | *[ecdsa.PublicKey](http://golang.org/pkg/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](http://golang.org/pkg/crypto/ecdsa/#PrivateKey) - EdDSA1 | [ed25519.PublicKey](https://godoc.org/pkg/crypto/ed25519#PublicKey), [ed25519.PrivateKey](https://godoc.org/pkg/crypto/ed25519#PrivateKey) + RSA | *[rsa.PublicKey](https://pkg.go.dev/crypto/rsa/#PublicKey), *[rsa.PrivateKey](https://pkg.go.dev/crypto/rsa/#PrivateKey) + ECDH, ECDSA | *[ecdsa.PublicKey](https://pkg.go.dev/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](https://pkg.go.dev/crypto/ecdsa/#PrivateKey) + EdDSA1 | [ed25519.PublicKey](https://pkg.go.dev/crypto/ed25519#PublicKey), [ed25519.PrivateKey](https://pkg.go.dev/crypto/ed25519#PrivateKey) AES, HMAC | []byte 1. Only available in version 2 or later of the package ## Examples -[![godoc](http://img.shields.io/badge/godoc-jose_package-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2) -[![godoc](http://img.shields.io/badge/godoc-jwt_package-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2/jwt) +[![godoc](https://pkg.go.dev/badge/github.com/go-jose/go-jose/v3.svg)](https://pkg.go.dev/github.com/go-jose/go-jose/v3) +[![godoc](https://pkg.go.dev/badge/github.com/go-jose/go-jose/v3/jwt.svg)](https://pkg.go.dev/github.com/go-jose/go-jose/v3/jwt) Examples can be found in the Godoc reference for this package. The -[`jose-util`](https://github.com/go-jose/go-jose/tree/master/jose-util) +[`jose-util`](https://github.com/go-jose/go-jose/tree/v3/jose-util) subdirectory also contains a small command-line utility which might be useful as an example as well. diff --git a/vendor/github.com/go-jose/go-jose/v3/SECURITY.md b/vendor/github.com/go-jose/go-jose/v3/SECURITY.md new file mode 100644 index 00000000..2f18a75a --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/SECURITY.md @@ -0,0 +1,13 @@ +# Security Policy +This document explains how to contact the Let's Encrypt security team to report security vulnerabilities. + +## Supported Versions +| Version | Supported | +| ------- | ----------| +| >= v3 | ✓ | +| v2 | ✗ | +| v1 | ✗ | + +## Reporting a vulnerability + +Please see [https://letsencrypt.org/contact/#security](https://letsencrypt.org/contact/#security) for the email address to report a vulnerability. Ensure that the subject line for your report contains the word `vulnerability` and is descriptive. Your email should be acknowledged within 24 hours. If you do not receive a response within 24 hours, please follow-up again with another email. diff --git a/vendor/github.com/go-jose/go-jose/v3/asymmetric.go b/vendor/github.com/go-jose/go-jose/v3/asymmetric.go index 78abc326..d4d4961b 100644 --- a/vendor/github.com/go-jose/go-jose/v3/asymmetric.go +++ b/vendor/github.com/go-jose/go-jose/v3/asymmetric.go @@ -285,6 +285,9 @@ func (ctx rsaDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm switch alg { case RS256, RS384, RS512: + // TODO(https://github.com/go-jose/go-jose/issues/40): As of go1.20, the + // random parameter is legacy and ignored, and it can be nil. + // https://cs.opensource.google/go/go/+/refs/tags/go1.20:src/crypto/rsa/pkcs1v15.go;l=263;bpv=0;bpt=1 out, err = rsa.SignPKCS1v15(RandReader, ctx.privateKey, hash, hashed) case PS256, PS384, PS512: out, err = rsa.SignPSS(RandReader, ctx.privateKey, hash, hashed, &rsa.PSSOptions{ diff --git a/vendor/github.com/go-jose/go-jose/v3/crypter.go b/vendor/github.com/go-jose/go-jose/v3/crypter.go index 6901137e..8870e890 100644 --- a/vendor/github.com/go-jose/go-jose/v3/crypter.go +++ b/vendor/github.com/go-jose/go-jose/v3/crypter.go @@ -21,7 +21,6 @@ import ( "crypto/rsa" "errors" "fmt" - "reflect" "github.com/go-jose/go-jose/v3/json" ) @@ -76,14 +75,24 @@ type recipientKeyInfo struct { type EncrypterOptions struct { Compression CompressionAlgorithm - // Optional map of additional keys to be inserted into the protected header - // of a JWS object. Some specifications which make use of JWS like to insert - // additional values here. All values must be JSON-serializable. + // Optional map of name/value pairs to be inserted into the protected + // header of a JWS object. Some specifications which make use of + // JWS require additional values here. + // + // Values will be serialized by [json.Marshal] and must be valid inputs to + // that function. + // + // [json.Marshal]: https://pkg.go.dev/encoding/json#Marshal ExtraHeaders map[HeaderKey]interface{} } // WithHeader adds an arbitrary value to the ExtraHeaders map, initializing it -// if necessary. It returns itself and so can be used in a fluent style. +// if necessary, and returns the updated EncrypterOptions. +// +// The v parameter will be serialized by [json.Marshal] and must be a valid +// input to that function. +// +// [json.Marshal]: https://pkg.go.dev/encoding/json#Marshal func (eo *EncrypterOptions) WithHeader(k HeaderKey, v interface{}) *EncrypterOptions { if eo.ExtraHeaders == nil { eo.ExtraHeaders = map[HeaderKey]interface{}{} @@ -111,7 +120,17 @@ func (eo *EncrypterOptions) WithType(typ ContentType) *EncrypterOptions { // default of 100000 will be used for the count and a 128-bit random salt will // be generated. type Recipient struct { - Algorithm KeyAlgorithm + Algorithm KeyAlgorithm + // Key must have one of these types: + // - ed25519.PublicKey + // - *ecdsa.PublicKey + // - *rsa.PublicKey + // - *JSONWebKey + // - JSONWebKey + // - []byte (a symmetric key) + // - Any type that satisfies the OpaqueKeyEncrypter interface + // + // The type of Key must match the value of Algorithm. Key interface{} KeyID string PBES2Count int @@ -150,16 +169,17 @@ func NewEncrypter(enc ContentEncryption, rcpt Recipient, opts *EncrypterOptions) switch rcpt.Algorithm { case DIRECT: // Direct encryption mode must be treated differently - if reflect.TypeOf(rawKey) != reflect.TypeOf([]byte{}) { + keyBytes, ok := rawKey.([]byte) + if !ok { return nil, ErrUnsupportedKeyType } - if encrypter.cipher.keySize() != len(rawKey.([]byte)) { + if encrypter.cipher.keySize() != len(keyBytes) { return nil, ErrInvalidKeySize } encrypter.keyGenerator = staticKeyGenerator{ - key: rawKey.([]byte), + key: keyBytes, } - recipientInfo, _ := newSymmetricRecipient(rcpt.Algorithm, rawKey.([]byte)) + recipientInfo, _ := newSymmetricRecipient(rcpt.Algorithm, keyBytes) recipientInfo.keyID = keyID if rcpt.KeyID != "" { recipientInfo.keyID = rcpt.KeyID @@ -168,16 +188,16 @@ func NewEncrypter(enc ContentEncryption, rcpt Recipient, opts *EncrypterOptions) return encrypter, nil case ECDH_ES: // ECDH-ES (w/o key wrapping) is similar to DIRECT mode - typeOf := reflect.TypeOf(rawKey) - if typeOf != reflect.TypeOf(&ecdsa.PublicKey{}) { + keyDSA, ok := rawKey.(*ecdsa.PublicKey) + if !ok { return nil, ErrUnsupportedKeyType } encrypter.keyGenerator = ecKeyGenerator{ size: encrypter.cipher.keySize(), algID: string(enc), - publicKey: rawKey.(*ecdsa.PublicKey), + publicKey: keyDSA, } - recipientInfo, _ := newECDHRecipient(rcpt.Algorithm, rawKey.(*ecdsa.PublicKey)) + recipientInfo, _ := newECDHRecipient(rcpt.Algorithm, keyDSA) recipientInfo.keyID = keyID if rcpt.KeyID != "" { recipientInfo.keyID = rcpt.KeyID @@ -270,9 +290,8 @@ func makeJWERecipient(alg KeyAlgorithm, encryptionKey interface{}) (recipientKey recipient, err := makeJWERecipient(alg, encryptionKey.Key) recipient.keyID = encryptionKey.KeyID return recipient, err - } - if encrypter, ok := encryptionKey.(OpaqueKeyEncrypter); ok { - return newOpaqueKeyEncrypter(alg, encrypter) + case OpaqueKeyEncrypter: + return newOpaqueKeyEncrypter(alg, encryptionKey) } return recipientKeyInfo{}, ErrUnsupportedKeyType } @@ -300,11 +319,11 @@ func newDecrypter(decryptionKey interface{}) (keyDecrypter, error) { return newDecrypter(decryptionKey.Key) case *JSONWebKey: return newDecrypter(decryptionKey.Key) + case OpaqueKeyDecrypter: + return &opaqueKeyDecrypter{decrypter: decryptionKey}, nil + default: + return nil, ErrUnsupportedKeyType } - if okd, ok := decryptionKey.(OpaqueKeyDecrypter); ok { - return &opaqueKeyDecrypter{decrypter: okd}, nil - } - return nil, ErrUnsupportedKeyType } // Implementation of encrypt method producing a JWE object. @@ -403,9 +422,27 @@ func (ctx *genericEncrypter) Options() EncrypterOptions { } } -// Decrypt and validate the object and return the plaintext. Note that this -// function does not support multi-recipient, if you desire multi-recipient +// Decrypt and validate the object and return the plaintext. This +// function does not support multi-recipient. If you desire multi-recipient // decryption use DecryptMulti instead. +// +// The decryptionKey argument must contain a private or symmetric key +// and must have one of these types: +// - *ecdsa.PrivateKey +// - *rsa.PrivateKey +// - *JSONWebKey +// - JSONWebKey +// - *JSONWebKeySet +// - JSONWebKeySet +// - []byte (a symmetric key) +// - string (a symmetric key) +// - Any type that satisfies the OpaqueKeyDecrypter interface. +// +// Note that ed25519 is only available for signatures, not encryption, so is +// not an option here. +// +// Automatically decompresses plaintext, but returns an error if the decompressed +// data would be >250kB or >10x the size of the compressed data, whichever is larger. func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) { headers := obj.mergedHeaders(nil) @@ -462,15 +499,24 @@ func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) // The "zip" header parameter may only be present in the protected header. if comp := obj.protected.getCompression(); comp != "" { plaintext, err = decompress(comp, plaintext) + if err != nil { + return nil, fmt.Errorf("go-jose/go-jose: failed to decompress plaintext: %v", err) + } } - return plaintext, err + return plaintext, nil } // DecryptMulti decrypts and validates the object and returns the plaintexts, // with support for multiple recipients. It returns the index of the recipient // for which the decryption was successful, the merged headers for that recipient, // and the plaintext. +// +// The decryptionKey argument must have one of the types allowed for the +// decryptionKey argument of Decrypt(). +// +// Automatically decompresses plaintext, but returns an error if the decompressed +// data would be >250kB or >3x the size of the compressed data, whichever is larger. func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Header, []byte, error) { globalHeaders := obj.mergedHeaders(nil) @@ -532,7 +578,10 @@ func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Heade // The "zip" header parameter may only be present in the protected header. if comp := obj.protected.getCompression(); comp != "" { - plaintext, _ = decompress(comp, plaintext) + plaintext, err = decompress(comp, plaintext) + if err != nil { + return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: failed to decompress plaintext: %v", err) + } } sanitized, err := headers.sanitized() diff --git a/vendor/github.com/go-jose/go-jose/v3/doc.go b/vendor/github.com/go-jose/go-jose/v3/doc.go index 71ec1c41..0ad40ca0 100644 --- a/vendor/github.com/go-jose/go-jose/v3/doc.go +++ b/vendor/github.com/go-jose/go-jose/v3/doc.go @@ -15,13 +15,11 @@ */ /* - Package jose aims to provide an implementation of the Javascript Object Signing and Encryption set of standards. It implements encryption and signing based on the JSON Web Encryption and JSON Web Signature standards, with optional JSON Web Token support available in a sub-package. The library supports both the compact and JWS/JWE JSON Serialization formats, and has optional support for multiple recipients. - */ package jose diff --git a/vendor/github.com/go-jose/go-jose/v3/encoding.go b/vendor/github.com/go-jose/go-jose/v3/encoding.go index 968a4249..9f07cfdc 100644 --- a/vendor/github.com/go-jose/go-jose/v3/encoding.go +++ b/vendor/github.com/go-jose/go-jose/v3/encoding.go @@ -21,6 +21,7 @@ import ( "compress/flate" "encoding/base64" "encoding/binary" + "fmt" "io" "math/big" "strings" @@ -85,7 +86,7 @@ func decompress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) { } } -// Compress with DEFLATE +// deflate compresses the input. func deflate(input []byte) ([]byte, error) { output := new(bytes.Buffer) @@ -97,15 +98,27 @@ func deflate(input []byte) ([]byte, error) { return output.Bytes(), err } -// Decompress with DEFLATE +// inflate decompresses the input. +// +// Errors if the decompressed data would be >250kB or >10x the size of the +// compressed data, whichever is larger. func inflate(input []byte) ([]byte, error) { output := new(bytes.Buffer) reader := flate.NewReader(bytes.NewBuffer(input)) - _, err := io.Copy(output, reader) - if err != nil { + maxCompressedSize := 10 * int64(len(input)) + if maxCompressedSize < 250000 { + maxCompressedSize = 250000 + } + + limit := maxCompressedSize + 1 + n, err := io.CopyN(output, reader, limit) + if err != nil && err != io.EOF { return nil, err } + if n == limit { + return nil, fmt.Errorf("uncompressed data would be too large (>%d bytes)", maxCompressedSize) + } err = reader.Close() return output.Bytes(), err @@ -189,3 +202,36 @@ func base64URLDecode(value string) ([]byte, error) { value = strings.TrimRight(value, "=") return base64.RawURLEncoding.DecodeString(value) } + +func base64EncodeLen(sl []byte) int { + return base64.RawURLEncoding.EncodedLen(len(sl)) +} + +func base64JoinWithDots(inputs ...[]byte) string { + if len(inputs) == 0 { + return "" + } + + // Count of dots. + totalCount := len(inputs) - 1 + + for _, input := range inputs { + totalCount += base64EncodeLen(input) + } + + out := make([]byte, totalCount) + startEncode := 0 + for i, input := range inputs { + base64.RawURLEncoding.Encode(out[startEncode:], input) + + if i == len(inputs)-1 { + continue + } + + startEncode += base64EncodeLen(input) + out[startEncode] = '.' + startEncode++ + } + + return string(out) +} diff --git a/vendor/github.com/go-jose/go-jose/v3/json/decode.go b/vendor/github.com/go-jose/go-jose/v3/json/decode.go index 4dbc4146..50634dd8 100644 --- a/vendor/github.com/go-jose/go-jose/v3/json/decode.go +++ b/vendor/github.com/go-jose/go-jose/v3/json/decode.go @@ -75,14 +75,13 @@ import ( // // The JSON null value unmarshals into an interface, map, pointer, or slice // by setting that Go value to nil. Because null is often used in JSON to mean -// ``not present,'' unmarshaling a JSON null into any other Go type has no effect +// “not present,†unmarshaling a JSON null into any other Go type has no effect // on the value and produces no error. // // When unmarshaling quoted strings, invalid UTF-8 or // invalid UTF-16 surrogate pairs are not treated as an error. // Instead, they are replaced by the Unicode replacement // character U+FFFD. -// func Unmarshal(data []byte, v interface{}) error { // Check for well-formedness. // Avoids filling out half a data structure diff --git a/vendor/github.com/go-jose/go-jose/v3/json/encode.go b/vendor/github.com/go-jose/go-jose/v3/json/encode.go index ea0a1361..98de68ce 100644 --- a/vendor/github.com/go-jose/go-jose/v3/json/encode.go +++ b/vendor/github.com/go-jose/go-jose/v3/json/encode.go @@ -58,6 +58,7 @@ import ( // becomes a member of the object unless // - the field's tag is "-", or // - the field is empty and its tag specifies the "omitempty" option. +// // The empty values are false, 0, any // nil pointer or interface value, and any array, slice, map, or string of // length zero. The object's default key string is the struct field name @@ -65,28 +66,28 @@ import ( // the struct field's tag value is the key name, followed by an optional comma // and options. Examples: // -// // Field is ignored by this package. -// Field int `json:"-"` +// // Field is ignored by this package. +// Field int `json:"-"` // -// // Field appears in JSON as key "myName". -// Field int `json:"myName"` +// // Field appears in JSON as key "myName". +// Field int `json:"myName"` // -// // Field appears in JSON as key "myName" and -// // the field is omitted from the object if its value is empty, -// // as defined above. -// Field int `json:"myName,omitempty"` +// // Field appears in JSON as key "myName" and +// // the field is omitted from the object if its value is empty, +// // as defined above. +// Field int `json:"myName,omitempty"` // -// // Field appears in JSON as key "Field" (the default), but -// // the field is skipped if empty. -// // Note the leading comma. -// Field int `json:",omitempty"` +// // Field appears in JSON as key "Field" (the default), but +// // the field is skipped if empty. +// // Note the leading comma. +// Field int `json:",omitempty"` // // The "string" option signals that a field is stored as JSON inside a // JSON-encoded string. It applies only to fields of string, floating point, // integer, or boolean types. This extra level of encoding is sometimes used // when communicating with JavaScript programs: // -// Int64String int64 `json:",string"` +// Int64String int64 `json:",string"` // // The key name will be used if it's a non-empty string consisting of // only Unicode letters, digits, dollar signs, percent signs, hyphens, @@ -133,7 +134,6 @@ import ( // JSON cannot represent cyclic data structures and Marshal does not // handle them. Passing cyclic structures to Marshal will result in // an infinite recursion. -// func Marshal(v interface{}) ([]byte, error) { e := &encodeState{} err := e.marshal(v) diff --git a/vendor/github.com/go-jose/go-jose/v3/json/stream.go b/vendor/github.com/go-jose/go-jose/v3/json/stream.go index 9b2b926b..f03b171e 100644 --- a/vendor/github.com/go-jose/go-jose/v3/json/stream.go +++ b/vendor/github.com/go-jose/go-jose/v3/json/stream.go @@ -240,7 +240,6 @@ var _ Unmarshaler = (*RawMessage)(nil) // Number, for JSON numbers // string, for JSON string literals // nil, for JSON null -// type Token interface{} const ( diff --git a/vendor/github.com/go-jose/go-jose/v3/jwe.go b/vendor/github.com/go-jose/go-jose/v3/jwe.go index bce30450..4267ac75 100644 --- a/vendor/github.com/go-jose/go-jose/v3/jwe.go +++ b/vendor/github.com/go-jose/go-jose/v3/jwe.go @@ -252,13 +252,13 @@ func (obj JSONWebEncryption) CompactSerialize() (string, error) { serializedProtected := mustSerializeJSON(obj.protected) - return fmt.Sprintf( - "%s.%s.%s.%s.%s", - base64.RawURLEncoding.EncodeToString(serializedProtected), - base64.RawURLEncoding.EncodeToString(obj.recipients[0].encryptedKey), - base64.RawURLEncoding.EncodeToString(obj.iv), - base64.RawURLEncoding.EncodeToString(obj.ciphertext), - base64.RawURLEncoding.EncodeToString(obj.tag)), nil + return base64JoinWithDots( + serializedProtected, + obj.recipients[0].encryptedKey, + obj.iv, + obj.ciphertext, + obj.tag, + ), nil } // FullSerialize serializes an object using the full JSON serialization format. diff --git a/vendor/github.com/go-jose/go-jose/v3/jwk.go b/vendor/github.com/go-jose/go-jose/v3/jwk.go index 78ff5aca..e4021959 100644 --- a/vendor/github.com/go-jose/go-jose/v3/jwk.go +++ b/vendor/github.com/go-jose/go-jose/v3/jwk.go @@ -67,9 +67,21 @@ type rawJSONWebKey struct { X5tSHA256 string `json:"x5t#S256,omitempty"` } -// JSONWebKey represents a public or private key in JWK format. +// JSONWebKey represents a public or private key in JWK format. It can be +// marshaled into JSON and unmarshaled from JSON. type JSONWebKey struct { - // Cryptographic key, can be a symmetric or asymmetric key. + // Key is the Go in-memory representation of this key. It must have one + // of these types: + // - ed25519.PublicKey + // - ed25519.PrivateKey + // - *ecdsa.PublicKey + // - *ecdsa.PrivateKey + // - *rsa.PublicKey + // - *rsa.PrivateKey + // - []byte (a symmetric key) + // + // When marshaling this JSONWebKey into JSON, the "kty" header parameter + // will be automatically set based on the type of this field. Key interface{} // Key identifier, parsed from `kid` header. KeyID string @@ -389,6 +401,8 @@ func (k *JSONWebKey) Thumbprint(hash crypto.Hash) ([]byte, error) { input, err = rsaThumbprintInput(key.N, key.E) case ed25519.PrivateKey: input, err = edThumbprintInput(ed25519.PublicKey(key[32:])) + case OpaqueSigner: + return key.Public().Thumbprint(hash) default: return nil, fmt.Errorf("go-jose/go-jose: unknown key type '%s'", reflect.TypeOf(key)) } diff --git a/vendor/github.com/go-jose/go-jose/v3/jws.go b/vendor/github.com/go-jose/go-jose/v3/jws.go index 865f16ad..e37007db 100644 --- a/vendor/github.com/go-jose/go-jose/v3/jws.go +++ b/vendor/github.com/go-jose/go-jose/v3/jws.go @@ -314,15 +314,18 @@ func (obj JSONWebSignature) compactSerialize(detached bool) (string, error) { return "", ErrNotSupported } - serializedProtected := base64.RawURLEncoding.EncodeToString(mustSerializeJSON(obj.Signatures[0].protected)) - payload := "" - signature := base64.RawURLEncoding.EncodeToString(obj.Signatures[0].Signature) + serializedProtected := mustSerializeJSON(obj.Signatures[0].protected) + var payload []byte if !detached { - payload = base64.RawURLEncoding.EncodeToString(obj.payload) + payload = obj.payload } - return fmt.Sprintf("%s.%s.%s", serializedProtected, payload, signature), nil + return base64JoinWithDots( + serializedProtected, + payload, + obj.Signatures[0].Signature, + ), nil } // CompactSerialize serializes an object using the compact serialization format. diff --git a/vendor/github.com/go-jose/go-jose/v3/opaque.go b/vendor/github.com/go-jose/go-jose/v3/opaque.go index fc3e8d2e..68db085e 100644 --- a/vendor/github.com/go-jose/go-jose/v3/opaque.go +++ b/vendor/github.com/go-jose/go-jose/v3/opaque.go @@ -121,7 +121,7 @@ func (oke *opaqueKeyEncrypter) encryptKey(cek []byte, alg KeyAlgorithm) (recipie return oke.encrypter.encryptKey(cek, alg) } -//OpaqueKeyDecrypter is an interface that supports decrypting keys with an opaque key. +// OpaqueKeyDecrypter is an interface that supports decrypting keys with an opaque key. type OpaqueKeyDecrypter interface { DecryptKey(encryptedKey []byte, header Header) ([]byte, error) } diff --git a/vendor/github.com/go-jose/go-jose/v3/shared.go b/vendor/github.com/go-jose/go-jose/v3/shared.go index fc2505e0..489a04e3 100644 --- a/vendor/github.com/go-jose/go-jose/v3/shared.go +++ b/vendor/github.com/go-jose/go-jose/v3/shared.go @@ -183,8 +183,13 @@ type Header struct { // Unverified certificate chain parsed from x5c header. certificates []*x509.Certificate - // Any headers not recognised above get unmarshalled - // from JSON in a generic manner and placed in this map. + // At parse time, each header parameter with a name other than "kid", + // "jwk", "alg", "nonce", or "x5c" will have its value passed to + // [json.Unmarshal] to unmarshal it into an interface value. + // The resulting value will be stored in this map, with the header + // parameter name as the key. + // + // [json.Unmarshal]: https://pkg.go.dev/encoding/json#Unmarshal ExtraHeaders map[HeaderKey]interface{} } diff --git a/vendor/github.com/go-jose/go-jose/v3/signing.go b/vendor/github.com/go-jose/go-jose/v3/signing.go index 81d55f58..52f3d856 100644 --- a/vendor/github.com/go-jose/go-jose/v3/signing.go +++ b/vendor/github.com/go-jose/go-jose/v3/signing.go @@ -40,6 +40,15 @@ type Signer interface { } // SigningKey represents an algorithm/key used to sign a message. +// +// Key must have one of these types: +// - ed25519.PrivateKey +// - *ecdsa.PrivateKey +// - *rsa.PrivateKey +// - *JSONWebKey +// - JSONWebKey +// - []byte (an HMAC key) +// - Any type that satisfies the OpaqueSigner interface type SigningKey struct { Algorithm SignatureAlgorithm Key interface{} @@ -52,12 +61,22 @@ type SignerOptions struct { // Optional map of additional keys to be inserted into the protected header // of a JWS object. Some specifications which make use of JWS like to insert - // additional values here. All values must be JSON-serializable. + // additional values here. + // + // Values will be serialized by [json.Marshal] and must be valid inputs to + // that function. + // + // [json.Marshal]: https://pkg.go.dev/encoding/json#Marshal ExtraHeaders map[HeaderKey]interface{} } // WithHeader adds an arbitrary value to the ExtraHeaders map, initializing it -// if necessary. It returns itself and so can be used in a fluent style. +// if necessary, and returns the updated SignerOptions. +// +// The v argument will be serialized by [json.Marshal] and must be a valid +// input to that function. +// +// [json.Marshal]: https://pkg.go.dev/encoding/json#Marshal func (so *SignerOptions) WithHeader(k HeaderKey, v interface{}) *SignerOptions { if so.ExtraHeaders == nil { so.ExtraHeaders = map[HeaderKey]interface{}{} @@ -173,11 +192,11 @@ func newVerifier(verificationKey interface{}) (payloadVerifier, error) { return newVerifier(verificationKey.Key) case *JSONWebKey: return newVerifier(verificationKey.Key) + case OpaqueVerifier: + return &opaqueVerifier{verifier: verificationKey}, nil + default: + return nil, ErrUnsupportedKeyType } - if ov, ok := verificationKey.(OpaqueVerifier); ok { - return &opaqueVerifier{verifier: ov}, nil - } - return nil, ErrUnsupportedKeyType } func (ctx *genericSigner) addRecipient(alg SignatureAlgorithm, signingKey interface{}) error { @@ -204,11 +223,11 @@ func makeJWSRecipient(alg SignatureAlgorithm, signingKey interface{}) (recipient return newJWKSigner(alg, signingKey) case *JSONWebKey: return newJWKSigner(alg, *signingKey) + case OpaqueSigner: + return newOpaqueSigner(alg, signingKey) + default: + return recipientSigInfo{}, ErrUnsupportedKeyType } - if signer, ok := signingKey.(OpaqueSigner); ok { - return newOpaqueSigner(alg, signer) - } - return recipientSigInfo{}, ErrUnsupportedKeyType } func newJWKSigner(alg SignatureAlgorithm, signingKey JSONWebKey) (recipientSigInfo, error) { @@ -321,12 +340,21 @@ func (ctx *genericSigner) Options() SignerOptions { } // Verify validates the signature on the object and returns the payload. -// This function does not support multi-signature, if you desire multi-sig +// This function does not support multi-signature. If you desire multi-signature // verification use VerifyMulti instead. // // Be careful when verifying signatures based on embedded JWKs inside the // payload header. You cannot assume that the key received in a payload is // trusted. +// +// The verificationKey argument must have one of these types: +// - ed25519.PublicKey +// - *ecdsa.PublicKey +// - *rsa.PublicKey +// - *JSONWebKey +// - JSONWebKey +// - []byte (an HMAC key) +// - Any type that implements the OpaqueVerifier interface. func (obj JSONWebSignature) Verify(verificationKey interface{}) ([]byte, error) { err := obj.DetachedVerify(obj.payload, verificationKey) if err != nil { @@ -346,6 +374,9 @@ func (obj JSONWebSignature) UnsafePayloadWithoutVerification() []byte { // most cases, you will probably want to use Verify instead. DetachedVerify // is only useful if you have a payload and signature that are separated from // each other. +// +// The verificationKey argument must have one of the types allowed for the +// verificationKey argument of JSONWebSignature.Verify(). func (obj JSONWebSignature) DetachedVerify(payload []byte, verificationKey interface{}) error { key := tryJWKS(verificationKey, obj.headers()...) verifier, err := newVerifier(key) @@ -388,6 +419,9 @@ func (obj JSONWebSignature) DetachedVerify(payload []byte, verificationKey inter // returns the index of the signature that was verified, along with the signature // object and the payload. We return the signature and index to guarantee that // callers are getting the verified value. +// +// The verificationKey argument must have one of the types allowed for the +// verificationKey argument of JSONWebSignature.Verify(). func (obj JSONWebSignature) VerifyMulti(verificationKey interface{}) (int, Signature, []byte, error) { idx, sig, err := obj.DetachedVerifyMulti(obj.payload, verificationKey) if err != nil { @@ -405,6 +439,9 @@ func (obj JSONWebSignature) VerifyMulti(verificationKey interface{}) (int, Signa // DetachedVerifyMulti is only useful if you have a payload and signature that are // separated from each other, and the signature can have multiple signers at the // same time. +// +// The verificationKey argument must have one of the types allowed for the +// verificationKey argument of JSONWebSignature.Verify(). func (obj JSONWebSignature) DetachedVerifyMulti(payload []byte, verificationKey interface{}) (int, Signature, error) { key := tryJWKS(verificationKey, obj.headers()...) verifier, err := newVerifier(key) diff --git a/vendor/github.com/go-jose/go-jose/v3/symmetric.go b/vendor/github.com/go-jose/go-jose/v3/symmetric.go index 1ffd2708..10d8e19f 100644 --- a/vendor/github.com/go-jose/go-jose/v3/symmetric.go +++ b/vendor/github.com/go-jose/go-jose/v3/symmetric.go @@ -40,12 +40,17 @@ var RandReader = rand.Reader const ( // RFC7518 recommends a minimum of 1,000 iterations: - // https://tools.ietf.org/html/rfc7518#section-4.8.1.2 + // - https://tools.ietf.org/html/rfc7518#section-4.8.1.2 + // // NIST recommends a minimum of 10,000: - // https://pages.nist.gov/800-63-3/sp800-63b.html - // 1Password uses 100,000: - // https://support.1password.com/pbkdf2/ - defaultP2C = 100000 + // - https://pages.nist.gov/800-63-3/sp800-63b.html + // + // 1Password increased in 2023 from 100,000 to 650,000: + // - https://support.1password.com/pbkdf2/ + // + // OWASP recommended 600,000 in Dec 2022: + // - https://cheatsheetseries.owasp.org/cheatsheets/Password_Storage_Cheat_Sheet.html#pbkdf2 + defaultP2C = 600000 // Default salt size: 128 bits defaultP2SSize = 16 ) diff --git a/vendor/github.com/go-openapi/analysis/.golangci.yml b/vendor/github.com/go-openapi/analysis/.golangci.yml index e24a6c14..22f8d21c 100644 --- a/vendor/github.com/go-openapi/analysis/.golangci.yml +++ b/vendor/github.com/go-openapi/analysis/.golangci.yml @@ -4,53 +4,58 @@ linters-settings: golint: min-confidence: 0 gocyclo: - min-complexity: 40 - gocognit: - min-complexity: 40 + min-complexity: 45 maligned: suggest-new: true dupl: - threshold: 150 + threshold: 200 goconst: min-len: 2 - min-occurrences: 4 + min-occurrences: 3 linters: enable-all: true disable: - maligned + - unparam - lll - - gochecknoglobals - gochecknoinits - # scopelint is useful, but also reports false positives - # that unfortunately can't be disabled. So we disable the - # linter rather than changing code that works. - # see: https://github.com/kyoh86/scopelint/issues/4 - - scopelint + - gochecknoglobals + - funlen - godox - gocognit - #- whitespace + - whitespace - wsl - - funlen - - testpackage - wrapcheck - #- nlreturn + - testpackage + - nlreturn - gomnd - - goerr113 - exhaustivestruct - #- errorlint - #- nestif - - gofumpt + - goerr113 + - errorlint + - nestif - godot - - gci - - dogsled + - gofumpt - paralleltest - tparallel - thelper - ifshort - - forbidigo - - cyclop - - varnamelen - exhaustruct + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + - deadcode + - interfacer + - scopelint + - varcheck + - structcheck + - golint - nosnakecase diff --git a/vendor/github.com/go-openapi/analysis/README.md b/vendor/github.com/go-openapi/analysis/README.md index aad6da10..e005d4b3 100644 --- a/vendor/github.com/go-openapi/analysis/README.md +++ b/vendor/github.com/go-openapi/analysis/README.md @@ -1,8 +1,5 @@ -# OpenAPI initiative analysis +# OpenAPI analysis [![Build Status](https://github.com/go-openapi/analysis/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/analysis/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/analysis/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/analysis) -[![Build Status](https://travis-ci.org/go-openapi/analysis.svg?branch=master)](https://travis-ci.org/go-openapi/analysis) -[![Build status](https://ci.appveyor.com/api/projects/status/x377t5o9ennm847o/branch/master?svg=true)](https://ci.appveyor.com/project/casualjim/go-openapi/analysis/branch/master) -[![codecov](https://codecov.io/gh/go-openapi/analysis/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/analysis) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) [![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/analysis/master/LICENSE) [![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/analysis.svg)](https://pkg.go.dev/github.com/go-openapi/analysis) @@ -13,12 +10,12 @@ A foundational library to analyze an OAI specification document for easier reaso ## What's inside? -* A analyzer providing methods to walk the functional content of a specification +* An analyzer providing methods to walk the functional content of a specification * A spec flattener producing a self-contained document bundle, while preserving `$ref`s * A spec merger ("mixin") to merge several spec documents into a primary spec * A spec "fixer" ensuring that response descriptions are non empty -[Documentation](https://godoc.org/github.com/go-openapi/analysis) +[Documentation](https://pkg.go.dev/github.com/go-openapi/analysis) ## FAQ @@ -28,4 +25,3 @@ A foundational library to analyze an OAI specification document for easier reaso > This package currently only supports OpenAPI 2.0 (aka Swagger 2.0). > There is no plan to make it evolve toward supporting OpenAPI 3.x. > This [discussion thread](https://github.com/go-openapi/spec/issues/21) relates the full story. -> diff --git a/vendor/github.com/go-openapi/analysis/appveyor.yml b/vendor/github.com/go-openapi/analysis/appveyor.yml deleted file mode 100644 index c2f6fd73..00000000 --- a/vendor/github.com/go-openapi/analysis/appveyor.yml +++ /dev/null @@ -1,32 +0,0 @@ -version: "0.1.{build}" - -clone_folder: C:\go-openapi\analysis -shallow_clone: true # for startup speed -pull_requests: - do_not_increment_build_number: true - -#skip_tags: true -#skip_branch_with_pr: true - -# appveyor.yml -build: off - -environment: - GOPATH: c:\gopath - -stack: go 1.16 - -test_script: - - go test -v -timeout 20m ./... - -deploy: off - -notifications: - - provider: Slack - incoming_webhook: https://hooks.slack.com/services/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 - auth_token: - secure: Sf7kZf7ZGbnwWUMpffHwMu5A0cHkLK2MYY32LNTPj4+/3qC3Ghl7+9v4TSLOqOlCwdRNjOGblAq7s+GDJed6/xgRQl1JtCi1klzZNrYX4q01pgTPvvGcwbBkIYgeMaPeIRcK9OZnud7sRXdttozgTOpytps2U6Js32ip7uj5mHSg2ub0FwoSJwlS6dbezZ8+eDhoha0F/guY99BEwx8Bd+zROrT2TFGsSGOFGN6wFc7moCqTHO/YkWib13a2QNXqOxCCVBy/lt76Wp+JkeFppjHlzs/2lP3EAk13RIUAaesdEUHvIHrzCyNJEd3/+KO2DzsWOYfpktd+KBCvgaYOsoo7ubdT3IROeAegZdCgo/6xgCEsmFc9ZcqCfN5yNx2A+BZ2Vwmpws+bQ1E1+B5HDzzaiLcYfG4X2O210QVGVDLWsv1jqD+uPYeHY2WRfh5ZsIUFvaqgUEnwHwrK44/8REAhQavt1QAj5uJpsRd7CkRVPWRNK+yIky+wgbVUFEchRNmS55E7QWf+W4+4QZkQi7vUTMc9nbTUu2Es9NfvfudOpM2wZbn98fjpb/qq/nRv6Bk+ca+7XD5/IgNLMbWp2ouDdzbiHLCOfDUiHiDJhLfFZx9Bwo7ZwfzeOlbrQX66bx7xRKYmOe4DLrXhNcpbsMa8qbfxlZRCmYbubB/Y8h4= - channel: bots - on_build_success: false - on_build_failure: true - on_build_status_changed: true diff --git a/vendor/github.com/go-openapi/analysis/doc.go b/vendor/github.com/go-openapi/analysis/doc.go index d5294c09..e8d9f9b1 100644 --- a/vendor/github.com/go-openapi/analysis/doc.go +++ b/vendor/github.com/go-openapi/analysis/doc.go @@ -16,27 +16,27 @@ Package analysis provides methods to work with a Swagger specification document from package go-openapi/spec. -Analyzing a specification +## Analyzing a specification An analysed specification object (type Spec) provides methods to work with swagger definition. -Flattening or expanding a specification +## Flattening or expanding a specification Flattening a specification bundles all remote $ref in the main spec document. Depending on flattening options, additional preprocessing may take place: - full flattening: replacing all inline complex constructs by a named entry in #/definitions - expand: replace all $ref's in the document by their expanded content -Merging several specifications +## Merging several specifications Mixin several specifications merges all Swagger constructs, and warns about found conflicts. -Fixing a specification +## Fixing a specification Unmarshalling a specification with golang json unmarshalling may lead to some unwanted result on present but empty fields. -Analyzing a Swagger schema +## Analyzing a Swagger schema Swagger schemas are analyzed to determine their complexity and qualify their content. */ diff --git a/vendor/github.com/go-openapi/analysis/flatten.go b/vendor/github.com/go-openapi/analysis/flatten.go index 0576220f..ebedcc9d 100644 --- a/vendor/github.com/go-openapi/analysis/flatten.go +++ b/vendor/github.com/go-openapi/analysis/flatten.go @@ -62,28 +62,26 @@ func newContext() *context { // // There is a minimal and a full flattening mode. // -// // Minimally flattening a spec means: -// - Expanding parameters, responses, path items, parameter items and header items (references to schemas are left -// unscathed) -// - Importing external (http, file) references so they become internal to the document -// - Moving every JSON pointer to a $ref to a named definition (i.e. the reworked spec does not contain pointers -// like "$ref": "#/definitions/myObject/allOfs/1") +// - Expanding parameters, responses, path items, parameter items and header items (references to schemas are left +// unscathed) +// - Importing external (http, file) references so they become internal to the document +// - Moving every JSON pointer to a $ref to a named definition (i.e. the reworked spec does not contain pointers +// like "$ref": "#/definitions/myObject/allOfs/1") // // A minimally flattened spec thus guarantees the following properties: -// - all $refs point to a local definition (i.e. '#/definitions/...') -// - definitions are unique +// - all $refs point to a local definition (i.e. '#/definitions/...') +// - definitions are unique // // NOTE: arbitrary JSON pointers (other than $refs to top level definitions) are rewritten as definitions if they // represent a complex schema or express commonality in the spec. // Otherwise, they are simply expanded. // Self-referencing JSON pointers cannot resolve to a type and trigger an error. // -// // Minimal flattening is necessary and sufficient for codegen rendering using go-swagger. // // Fully flattening a spec means: -// - Moving every complex inline schema to be a definition with an auto-generated name in a depth-first fashion. +// - Moving every complex inline schema to be a definition with an auto-generated name in a depth-first fashion. // // By complex, we mean every JSON object with some properties. // Arrays, when they do not define a tuple, @@ -93,22 +91,21 @@ func newContext() *context { // have been created. // // Available flattening options: -// - Minimal: stops flattening after minimal $ref processing, leaving schema constructs untouched -// - Expand: expand all $ref's in the document (inoperant if Minimal set to true) -// - Verbose: croaks about name conflicts detected -// - RemoveUnused: removes unused parameters, responses and definitions after expansion/flattening +// - Minimal: stops flattening after minimal $ref processing, leaving schema constructs untouched +// - Expand: expand all $ref's in the document (inoperant if Minimal set to true) +// - Verbose: croaks about name conflicts detected +// - RemoveUnused: removes unused parameters, responses and definitions after expansion/flattening // // NOTE: expansion removes all $ref save circular $ref, which remain in place // // TODO: additional options -// - ProgagateNameExtensions: ensure that created entries properly follow naming rules when their parent have set a -// x-go-name extension -// - LiftAllOfs: -// - limit the flattening of allOf members when simple objects -// - merge allOf with validation only -// - merge allOf with extensions only -// - ... -// +// - ProgagateNameExtensions: ensure that created entries properly follow naming rules when their parent have set a +// x-go-name extension +// - LiftAllOfs: +// - limit the flattening of allOf members when simple objects +// - merge allOf with validation only +// - merge allOf with extensions only +// - ... func Flatten(opts FlattenOpts) error { debugLog("FlattenOpts: %#v", opts) @@ -270,6 +267,12 @@ func nameInlinedSchemas(opts *FlattenOpts) error { } func removeUnused(opts *FlattenOpts) { + for removeUnusedSinglePass(opts) { + // continue until no unused definition remains + } +} + +func removeUnusedSinglePass(opts *FlattenOpts) (hasRemoved bool) { expected := make(map[string]struct{}) for k := range opts.Swagger().Definitions { expected[path.Join(definitionsPath, jsonpointer.Escape(k))] = struct{}{} @@ -280,6 +283,7 @@ func removeUnused(opts *FlattenOpts) { } for k := range expected { + hasRemoved = true debugLog("removing unused definition %s", path.Base(k)) if opts.Verbose { log.Printf("info: removing unused definition: %s", path.Base(k)) @@ -288,6 +292,8 @@ func removeUnused(opts *FlattenOpts) { } opts.Spec.reload() // re-analyze + + return hasRemoved } func importKnownRef(entry sortref.RefRevIdx, refStr, newName string, opts *FlattenOpts) error { @@ -334,7 +340,7 @@ func importNewRef(entry sortref.RefRevIdx, refStr string, opts *FlattenOpts) err } // generate a unique name - isOAIGen means that a naming conflict was resolved by changing the name - newName, isOAIGen = uniqifyName(opts.Swagger().Definitions, nameFromRef(entry.Ref)) + newName, isOAIGen = uniqifyName(opts.Swagger().Definitions, nameFromRef(entry.Ref, opts)) debugLog("new name for [%s]: %s - with name conflict:%t", strings.Join(entry.Keys, ", "), newName, isOAIGen) opts.flattenContext.resolved[refStr] = newName @@ -488,9 +494,9 @@ func stripPointersAndOAIGen(opts *FlattenOpts) error { // stripOAIGen strips the spec from unnecessary OAIGen constructs, initially created to dedupe flattened definitions. // // A dedupe is deemed unnecessary whenever: -// - the only conflict is with its (single) parent: OAIGen is merged into its parent (reinlining) -// - there is a conflict with multiple parents: merge OAIGen in first parent, the rewrite other parents to point to -// the first parent. +// - the only conflict is with its (single) parent: OAIGen is merged into its parent (reinlining) +// - there is a conflict with multiple parents: merge OAIGen in first parent, the rewrite other parents to point to +// the first parent. // // This function returns true whenever it re-inlined a complex schema, so the caller may chose to iterate // pointer and name resolution again. @@ -652,6 +658,7 @@ func namePointers(opts *FlattenOpts) error { refsToReplace := make(map[string]SchemaRef, len(opts.Spec.references.schemas)) for k, ref := range opts.Spec.references.allRefs { + debugLog("name pointers: %q => %#v", k, ref) if path.Dir(ref.String()) == definitionsPath { // this a ref to a top-level definition: ok continue @@ -769,6 +776,10 @@ func flattenAnonPointer(key string, v SchemaRef, refsToReplace map[string]Schema // identifying edge case when the namer did nothing because we point to a non-schema object // no definition is created and we expand the $ref for all callers + debugLog("decide what to do with the schema pointed to: asch.IsSimpleSchema=%t, len(callers)=%d, parts.IsSharedParam=%t, parts.IsSharedResponse=%t", + asch.IsSimpleSchema, len(callers), parts.IsSharedParam(), parts.IsSharedResponse(), + ) + if (!asch.IsSimpleSchema || len(callers) > 1) && !parts.IsSharedParam() && !parts.IsSharedResponse() { debugLog("replace JSON pointer at [%s] by definition: %s", key, v.Ref.String()) if err := namer.Name(v.Ref.String(), v.Schema, asch); err != nil { @@ -791,6 +802,7 @@ func flattenAnonPointer(key string, v SchemaRef, refsToReplace map[string]Schema return nil } + // everything that is a simple schema and not factorizable is expanded debugLog("expand JSON pointer for key=%s", key) if err := replace.UpdateRefWithSchema(opts.Swagger(), key, v.Schema); err != nil { diff --git a/vendor/github.com/go-openapi/analysis/flatten_name.go b/vendor/github.com/go-openapi/analysis/flatten_name.go index 3ad2ccfb..c7d7938e 100644 --- a/vendor/github.com/go-openapi/analysis/flatten_name.go +++ b/vendor/github.com/go-openapi/analysis/flatten_name.go @@ -33,12 +33,14 @@ func (isn *InlineSchemaNamer) Name(key string, schema *spec.Schema, aschema *Ana } // create unique name - newName, isOAIGen := uniqifyName(isn.Spec.Definitions, swag.ToJSONName(name)) + mangle := mangler(isn.opts) + newName, isOAIGen := uniqifyName(isn.Spec.Definitions, mangle(name)) // clone schema sch := schutils.Clone(schema) // replace values on schema + debugLog("rewriting schema to ref: key=%s with new name: %s", key, newName) if err := replace.RewriteSchemaToRef(isn.Spec, key, spec.MustCreateRef(path.Join(definitionsPath, newName))); err != nil { return fmt.Errorf("error while creating definition %q from inline schema: %w", newName, err) @@ -149,13 +151,15 @@ func namesFromKey(parts sortref.SplitKey, aschema *AnalyzedSchema, operations ma startIndex int ) - if parts.IsOperation() { + switch { + case parts.IsOperation(): baseNames, startIndex = namesForOperation(parts, operations) - } - - // definitions - if parts.IsDefinition() { + case parts.IsDefinition(): baseNames, startIndex = namesForDefinition(parts) + default: + // this a non-standard pointer: build a name by concatenating its parts + baseNames = [][]string{parts} + startIndex = len(baseNames) + 1 } result := make([]string, 0, len(baseNames)) @@ -169,6 +173,7 @@ func namesFromKey(parts sortref.SplitKey, aschema *AnalyzedSchema, operations ma } sort.Strings(result) + debugLog("names from parts: %v => %v", parts, result) return result } @@ -256,10 +261,20 @@ func partAdder(aschema *AnalyzedSchema) sortref.PartAdder { } } -func nameFromRef(ref spec.Ref) string { +func mangler(o *FlattenOpts) func(string) string { + if o.KeepNames { + return func(in string) string { return in } + } + + return swag.ToJSONName +} + +func nameFromRef(ref spec.Ref, o *FlattenOpts) string { + mangle := mangler(o) + u := ref.GetURL() if u.Fragment != "" { - return swag.ToJSONName(path.Base(u.Fragment)) + return mangle(path.Base(u.Fragment)) } if u.Path != "" { @@ -267,19 +282,19 @@ func nameFromRef(ref spec.Ref) string { if bn != "" && bn != "/" { ext := path.Ext(bn) if ext != "" { - return swag.ToJSONName(bn[:len(bn)-len(ext)]) + return mangle(bn[:len(bn)-len(ext)]) } - return swag.ToJSONName(bn) + return mangle(bn) } } - return swag.ToJSONName(strings.ReplaceAll(u.Host, ".", " ")) + return mangle(strings.ReplaceAll(u.Host, ".", " ")) } // GenLocation indicates from which section of the specification (models or operations) a definition has been created. // -// This is reflected in the output spec with a "x-go-gen-location" extension. At the moment, this is is provided +// This is reflected in the output spec with a "x-go-gen-location" extension. At the moment, this is provided // for information only. func GenLocation(parts sortref.SplitKey) string { switch { diff --git a/vendor/github.com/go-openapi/analysis/flatten_options.go b/vendor/github.com/go-openapi/analysis/flatten_options.go index c5bb97b0..c943fe1e 100644 --- a/vendor/github.com/go-openapi/analysis/flatten_options.go +++ b/vendor/github.com/go-openapi/analysis/flatten_options.go @@ -26,6 +26,7 @@ type FlattenOpts struct { Verbose bool // enable some reporting on possible name conflicts detected RemoveUnused bool // When true, remove unused parameters, responses and definitions after expansion/flattening ContinueOnError bool // Continue when spec expansion issues are found + KeepNames bool // Do not attempt to jsonify names from references when flattening /* Extra keys */ _ struct{} // require keys diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go b/vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go index 26c2a05a..c0f43e72 100644 --- a/vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go +++ b/vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go @@ -1,6 +1,7 @@ package replace import ( + "encoding/json" "fmt" "net/url" "os" @@ -40,6 +41,8 @@ func RewriteSchemaToRef(sp *spec.Swagger, key string, ref spec.Ref) error { if refable.Schema != nil { refable.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} } + case map[string]interface{}: // this happens e.g. if a schema points to an extension unmarshaled as map[string]interface{} + return rewriteParentRef(sp, key, ref) default: return fmt.Errorf("no schema with ref found at %s for %T", key, value) } @@ -120,6 +123,9 @@ func rewriteParentRef(sp *spec.Swagger, key string, ref spec.Ref) error { case spec.SchemaProperties: container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + case *interface{}: + *container = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + // NOTE: can't have case *spec.SchemaOrBool = parent in this case is *Schema default: @@ -318,8 +324,8 @@ type DeepestRefResult struct { } // DeepestRef finds the first definition ref, from a cascade of nested refs which are not definitions. -// - if no definition is found, returns the deepest ref. -// - pointers to external files are expanded +// - if no definition is found, returns the deepest ref. +// - pointers to external files are expanded // // NOTE: all external $ref's are assumed to be already expanded at this stage. func DeepestRef(sp *spec.Swagger, opts *spec.ExpandOptions, ref spec.Ref) (*DeepestRefResult, error) { @@ -385,8 +391,9 @@ DOWNREF: err := asSchema.UnmarshalJSON(asJSON) if err != nil { return nil, - fmt.Errorf("invalid type for resolved JSON pointer %s. Expected a schema a, got: %T", - currentRef.String(), value) + fmt.Errorf("invalid type for resolved JSON pointer %s. Expected a schema a, got: %T (%v)", + currentRef.String(), value, err, + ) } warnings = append(warnings, fmt.Sprintf("found $ref %q (response) interpreted as schema", currentRef.String())) @@ -402,8 +409,9 @@ DOWNREF: var asSchema spec.Schema if err := asSchema.UnmarshalJSON(asJSON); err != nil { return nil, - fmt.Errorf("invalid type for resolved JSON pointer %s. Expected a schema a, got: %T", - currentRef.String(), value) + fmt.Errorf("invalid type for resolved JSON pointer %s. Expected a schema a, got: %T (%v)", + currentRef.String(), value, err, + ) } warnings = append(warnings, fmt.Sprintf("found $ref %q (parameter) interpreted as schema", currentRef.String())) @@ -414,9 +422,25 @@ DOWNREF: currentRef = asSchema.Ref default: - return nil, - fmt.Errorf("unhandled type to resolve JSON pointer %s. Expected a Schema, got: %T", - currentRef.String(), value) + // fallback: attempts to resolve the pointer as a schema + if refable == nil { + break DOWNREF + } + + asJSON, _ := json.Marshal(refable) + var asSchema spec.Schema + if err := asSchema.UnmarshalJSON(asJSON); err != nil { + return nil, + fmt.Errorf("unhandled type to resolve JSON pointer %s. Expected a Schema, got: %T (%v)", + currentRef.String(), value, err, + ) + } + warnings = append(warnings, fmt.Sprintf("found $ref %q (%T) interpreted as schema", currentRef.String(), refable)) + + if asSchema.Ref.String() == "" { + break DOWNREF + } + currentRef = asSchema.Ref } } diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go b/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go index 18e552ea..ac80fc2e 100644 --- a/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go +++ b/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go @@ -69,7 +69,7 @@ func KeyParts(key string) SplitKey { return res } -// SplitKey holds of the parts of a /-separated key, soi that their location may be determined. +// SplitKey holds of the parts of a /-separated key, so that their location may be determined. type SplitKey []string // IsDefinition is true when the split key is in the #/definitions section of a spec diff --git a/vendor/github.com/go-openapi/analysis/mixin.go b/vendor/github.com/go-openapi/analysis/mixin.go index b2530526..57cb7a9d 100644 --- a/vendor/github.com/go-openapi/analysis/mixin.go +++ b/vendor/github.com/go-openapi/analysis/mixin.go @@ -367,7 +367,7 @@ func mergeSwaggerProps(primary *spec.Swagger, m *spec.Swagger) []string { return skipped } -// nolint: unparam +//nolint:unparam func mergeExternalDocs(primary *spec.ExternalDocumentation, m *spec.ExternalDocumentation) []string { if primary.Description == "" { primary.Description = m.Description diff --git a/vendor/github.com/go-openapi/analysis/schema.go b/vendor/github.com/go-openapi/analysis/schema.go index fc055095..95f35dc7 100644 --- a/vendor/github.com/go-openapi/analysis/schema.go +++ b/vendor/github.com/go-openapi/analysis/schema.go @@ -247,10 +247,10 @@ func (a *AnalyzedSchema) isArrayType() bool { // isAnalyzedAsComplex determines if an analyzed schema is eligible to flattening (i.e. it is "complex"). // // Complex means the schema is any of: -// - a simple type (primitive) -// - an array of something (items are possibly complex ; if this is the case, items will generate a definition) -// - a map of something (additionalProperties are possibly complex ; if this is the case, additionalProperties will -// generate a definition) +// - a simple type (primitive) +// - an array of something (items are possibly complex ; if this is the case, items will generate a definition) +// - a map of something (additionalProperties are possibly complex ; if this is the case, additionalProperties will +// generate a definition) func (a *AnalyzedSchema) isAnalyzedAsComplex() bool { return !a.IsSimpleSchema && !a.IsArray && !a.IsMap } diff --git a/vendor/github.com/go-openapi/errors/.golangci.yml b/vendor/github.com/go-openapi/errors/.golangci.yml index 4e1fc0c7..cf88ead3 100644 --- a/vendor/github.com/go-openapi/errors/.golangci.yml +++ b/vendor/github.com/go-openapi/errors/.golangci.yml @@ -4,45 +4,59 @@ linters-settings: golint: min-confidence: 0 gocyclo: - min-complexity: 30 + min-complexity: 45 maligned: suggest-new: true dupl: - threshold: 100 + threshold: 200 goconst: min-len: 2 - min-occurrences: 4 + min-occurrences: 3 + linters: enable-all: true disable: + - errname # this repo doesn't follow the convention advised by this linter - maligned + - unparam - lll + - gochecknoinits - gochecknoglobals + - funlen - godox - gocognit - whitespace - wsl - - funlen - - gochecknoglobals - - gochecknoinits - - scopelint - wrapcheck - - exhaustivestruct - - exhaustive - - nlreturn - testpackage - - gci - - gofumpt - - goerr113 + - nlreturn - gomnd - - tparallel + - exhaustivestruct + - goerr113 + - errorlint - nestif - godot - - errorlint + - gofumpt - paralleltest - tparallel - - cyclop - - errname - - varnamelen + - thelper + - ifshort - exhaustruct - - maintidx + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam + - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + - deadcode + - interfacer + - scopelint + - varcheck + - structcheck + - golint + - nosnakecase diff --git a/vendor/github.com/go-openapi/errors/README.md b/vendor/github.com/go-openapi/errors/README.md index 4aac049e..6d57ea55 100644 --- a/vendor/github.com/go-openapi/errors/README.md +++ b/vendor/github.com/go-openapi/errors/README.md @@ -1,11 +1,8 @@ -# OpenAPI errors +# OpenAPI errors [![Build Status](https://github.com/go-openapi/errors/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/errors/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/errors/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/errors) -[![Build Status](https://travis-ci.org/go-openapi/errors.svg?branch=master)](https://travis-ci.org/go-openapi/errors) -[![codecov](https://codecov.io/gh/go-openapi/errors/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/errors) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) [![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/errors/master/LICENSE) [![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/errors.svg)](https://pkg.go.dev/github.com/go-openapi/errors) -[![GolangCI](https://golangci.com/badges/github.com/go-openapi/errors.svg)](https://golangci.com) [![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/errors)](https://goreportcard.com/report/github.com/go-openapi/errors) Shared errors and error interface used throughout the various libraries found in the go-openapi toolkit. diff --git a/vendor/github.com/go-openapi/errors/api.go b/vendor/github.com/go-openapi/errors/api.go index c13f3435..5320cb96 100644 --- a/vendor/github.com/go-openapi/errors/api.go +++ b/vendor/github.com/go-openapi/errors/api.go @@ -55,9 +55,15 @@ func (a apiError) MarshalJSON() ([]byte, error) { // New creates a new API error with a code and a message func New(code int32, message string, args ...interface{}) Error { if len(args) > 0 { - return &apiError{code, fmt.Sprintf(message, args...)} + return &apiError{ + code: code, + message: fmt.Sprintf(message, args...), + } + } + return &apiError{ + code: code, + message: message, } - return &apiError{code, message} } // NotFound creates a new not found error @@ -130,10 +136,14 @@ func flattenComposite(errs *CompositeError) *CompositeError { // MethodNotAllowed creates a new method not allowed error func MethodNotAllowed(requested string, allow []string) Error { msg := fmt.Sprintf("method %s is not allowed, but [%s] are", requested, strings.Join(allow, ",")) - return &MethodNotAllowedError{code: http.StatusMethodNotAllowed, Allowed: allow, message: msg} + return &MethodNotAllowedError{ + code: http.StatusMethodNotAllowed, + Allowed: allow, + message: msg, + } } -// ServeError the error handler interface implementation +// ServeError implements the http error handler interface func ServeError(rw http.ResponseWriter, r *http.Request, err error) { rw.Header().Set("Content-Type", "application/json") switch e := err.(type) { diff --git a/vendor/github.com/go-openapi/errors/schema.go b/vendor/github.com/go-openapi/errors/schema.go index da5f6c78..cf7ac2ed 100644 --- a/vendor/github.com/go-openapi/errors/schema.go +++ b/vendor/github.com/go-openapi/errors/schema.go @@ -120,6 +120,10 @@ func (c *CompositeError) Error() string { return c.message } +func (c *CompositeError) Unwrap() []error { + return c.Errors +} + // MarshalJSON implements the JSON encoding interface func (c CompositeError) MarshalJSON() ([]byte, error) { return json.Marshal(map[string]interface{}{ @@ -133,7 +137,7 @@ func (c CompositeError) MarshalJSON() ([]byte, error) { func CompositeValidationError(errors ...error) *CompositeError { return &CompositeError{ code: CompositeErrorCode, - Errors: append([]error{}, errors...), + Errors: append(make([]error, 0, len(errors)), errors...), message: "validation failure list", } } diff --git a/vendor/github.com/go-openapi/jsonpointer/.golangci.yml b/vendor/github.com/go-openapi/jsonpointer/.golangci.yml new file mode 100644 index 00000000..22f8d21c --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/.golangci.yml @@ -0,0 +1,61 @@ +linters-settings: + govet: + check-shadowing: true + golint: + min-confidence: 0 + gocyclo: + min-complexity: 45 + maligned: + suggest-new: true + dupl: + threshold: 200 + goconst: + min-len: 2 + min-occurrences: 3 + +linters: + enable-all: true + disable: + - maligned + - unparam + - lll + - gochecknoinits + - gochecknoglobals + - funlen + - godox + - gocognit + - whitespace + - wsl + - wrapcheck + - testpackage + - nlreturn + - gomnd + - exhaustivestruct + - goerr113 + - errorlint + - nestif + - godot + - gofumpt + - paralleltest + - tparallel + - thelper + - ifshort + - exhaustruct + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam + - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + - deadcode + - interfacer + - scopelint + - varcheck + - structcheck + - golint + - nosnakecase diff --git a/vendor/github.com/go-openapi/jsonpointer/README.md b/vendor/github.com/go-openapi/jsonpointer/README.md index 813788af..0108f1d5 100644 --- a/vendor/github.com/go-openapi/jsonpointer/README.md +++ b/vendor/github.com/go-openapi/jsonpointer/README.md @@ -1,6 +1,10 @@ -# gojsonpointer [![Build Status](https://travis-ci.org/go-openapi/jsonpointer.svg?branch=master)](https://travis-ci.org/go-openapi/jsonpointer) [![codecov](https://codecov.io/gh/go-openapi/jsonpointer/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonpointer) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +# gojsonpointer [![Build Status](https://github.com/go-openapi/jsonpointer/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/jsonpointer/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/jsonpointer/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonpointer) + +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/jsonpointer.svg)](https://pkg.go.dev/github.com/go-openapi/jsonpointer) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/jsonpointer)](https://goreportcard.com/report/github.com/go-openapi/jsonpointer) -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/jsonpointer?status.svg)](http://godoc.org/github.com/go-openapi/jsonpointer) An implementation of JSON Pointer - Go language ## Status diff --git a/vendor/github.com/go-openapi/jsonpointer/pointer.go b/vendor/github.com/go-openapi/jsonpointer/pointer.go index de60dc7d..d975773d 100644 --- a/vendor/github.com/go-openapi/jsonpointer/pointer.go +++ b/vendor/github.com/go-openapi/jsonpointer/pointer.go @@ -110,19 +110,39 @@ func SetForToken(document any, decodedToken string, value any) (any, error) { return document, setSingleImpl(document, value, decodedToken, swag.DefaultJSONNameProvider) } +func isNil(input any) bool { + if input == nil { + return true + } + + kind := reflect.TypeOf(input).Kind() + switch kind { //nolint:exhaustive + case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Chan: + return reflect.ValueOf(input).IsNil() + default: + return false + } +} + func getSingleImpl(node any, decodedToken string, nameProvider *swag.NameProvider) (any, reflect.Kind, error) { rValue := reflect.Indirect(reflect.ValueOf(node)) kind := rValue.Kind() + if isNil(node) { + return nil, kind, fmt.Errorf("nil value has not field %q", decodedToken) + } - if rValue.Type().Implements(jsonPointableType) { - r, err := node.(JSONPointable).JSONLookup(decodedToken) + switch typed := node.(type) { + case JSONPointable: + r, err := typed.JSONLookup(decodedToken) if err != nil { return nil, kind, err } return r, kind, nil + case *any: // case of a pointer to interface, that is not resolved by reflect.Indirect + return getSingleImpl(*typed, decodedToken, nameProvider) } - switch kind { + switch kind { //nolint:exhaustive case reflect.Struct: nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) if !ok { @@ -170,7 +190,7 @@ func setSingleImpl(node, data any, decodedToken string, nameProvider *swag.NameP return node.(JSONSetable).JSONSet(decodedToken, data) } - switch rValue.Kind() { + switch rValue.Kind() { //nolint:exhaustive case reflect.Struct: nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) if !ok { @@ -231,8 +251,7 @@ func (p *Pointer) get(node any, nameProvider *swag.NameProvider) (any, reflect.K if err != nil { return nil, knd, err } - node, kind = r, knd - + node = r } rValue := reflect.ValueOf(node) @@ -284,7 +303,7 @@ func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error { continue } - switch kind { + switch kind { //nolint:exhaustive case reflect.Struct: nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) if !ok { @@ -405,11 +424,11 @@ func offsetSingleObject(dec *json.Decoder, decodedToken string) (int64, error) { case json.Delim: switch tk { case '{': - if err := drainSingle(dec); err != nil { + if err = drainSingle(dec); err != nil { return 0, err } case '[': - if err := drainSingle(dec); err != nil { + if err = drainSingle(dec); err != nil { return 0, err } } @@ -435,20 +454,21 @@ func offsetSingleArray(dec *json.Decoder, decodedToken string) (int64, error) { if err != nil { return 0, err } - switch tk := tk.(type) { - case json.Delim: - switch tk { + + if delim, isDelim := tk.(json.Delim); isDelim { + switch delim { case '{': - if err := drainSingle(dec); err != nil { + if err = drainSingle(dec); err != nil { return 0, err } case '[': - if err := drainSingle(dec); err != nil { + if err = drainSingle(dec); err != nil { return 0, err } } } } + if !dec.More() { return 0, fmt.Errorf("token reference %q not found", decodedToken) } @@ -456,27 +476,27 @@ func offsetSingleArray(dec *json.Decoder, decodedToken string) (int64, error) { } // drainSingle drains a single level of object or array. -// The decoder has to guarantee the begining delim (i.e. '{' or '[') has been consumed. +// The decoder has to guarantee the beginning delim (i.e. '{' or '[') has been consumed. func drainSingle(dec *json.Decoder) error { for dec.More() { tk, err := dec.Token() if err != nil { return err } - switch tk := tk.(type) { - case json.Delim: - switch tk { + if delim, isDelim := tk.(json.Delim); isDelim { + switch delim { case '{': - if err := drainSingle(dec); err != nil { + if err = drainSingle(dec); err != nil { return err } case '[': - if err := drainSingle(dec); err != nil { + if err = drainSingle(dec); err != nil { return err } } } } + // Consumes the ending delim if _, err := dec.Token(); err != nil { return err @@ -498,14 +518,14 @@ const ( // Unescape unescapes a json pointer reference token string to the original representation func Unescape(token string) string { - step1 := strings.Replace(token, encRefTok1, decRefTok1, -1) - step2 := strings.Replace(step1, encRefTok0, decRefTok0, -1) + step1 := strings.ReplaceAll(token, encRefTok1, decRefTok1) + step2 := strings.ReplaceAll(step1, encRefTok0, decRefTok0) return step2 } // Escape escapes a pointer reference token string func Escape(token string) string { - step1 := strings.Replace(token, decRefTok0, encRefTok0, -1) - step2 := strings.Replace(step1, decRefTok1, encRefTok1, -1) + step1 := strings.ReplaceAll(token, decRefTok0, encRefTok0) + step2 := strings.ReplaceAll(step1, decRefTok1, encRefTok1) return step2 } diff --git a/vendor/github.com/go-openapi/jsonreference/.golangci.yml b/vendor/github.com/go-openapi/jsonreference/.golangci.yml index 013fc194..22f8d21c 100644 --- a/vendor/github.com/go-openapi/jsonreference/.golangci.yml +++ b/vendor/github.com/go-openapi/jsonreference/.golangci.yml @@ -1,50 +1,61 @@ linters-settings: govet: check-shadowing: true + golint: + min-confidence: 0 gocyclo: - min-complexity: 30 + min-complexity: 45 maligned: suggest-new: true dupl: - threshold: 100 + threshold: 200 goconst: min-len: 2 - min-occurrences: 4 - paralleltest: - ignore-missing: true + min-occurrences: 3 + linters: enable-all: true disable: - maligned + - unparam - lll + - gochecknoinits - gochecknoglobals + - funlen - godox - gocognit - whitespace - wsl - - funlen - - gochecknoglobals - - gochecknoinits - - scopelint - wrapcheck - - exhaustivestruct - - exhaustive - - nlreturn - testpackage - - gci - - gofumpt - - goerr113 + - nlreturn - gomnd - - tparallel + - exhaustivestruct + - goerr113 + - errorlint - nestif - godot - - errorlint - - varcheck - - interfacer - - deadcode - - golint + - gofumpt + - paralleltest + - tparallel + - thelper - ifshort + - exhaustruct + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam + - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + - deadcode + - interfacer + - scopelint + - varcheck - structcheck + - golint - nosnakecase - - varnamelen - - exhaustruct diff --git a/vendor/github.com/go-openapi/jsonreference/README.md b/vendor/github.com/go-openapi/jsonreference/README.md index b94753aa..c7fc2049 100644 --- a/vendor/github.com/go-openapi/jsonreference/README.md +++ b/vendor/github.com/go-openapi/jsonreference/README.md @@ -1,15 +1,19 @@ -# gojsonreference [![Build Status](https://travis-ci.org/go-openapi/jsonreference.svg?branch=master)](https://travis-ci.org/go-openapi/jsonreference) [![codecov](https://codecov.io/gh/go-openapi/jsonreference/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonreference) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +# gojsonreference [![Build Status](https://github.com/go-openapi/jsonreference/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/jsonreference/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/jsonreference/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonreference) + +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonreference/master/LICENSE) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/jsonreference.svg)](https://pkg.go.dev/github.com/go-openapi/jsonreference) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/jsonreference)](https://goreportcard.com/report/github.com/go-openapi/jsonreference) -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonreference/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/jsonreference?status.svg)](http://godoc.org/github.com/go-openapi/jsonreference) An implementation of JSON Reference - Go language ## Status Feature complete. Stable API ## Dependencies -https://github.com/go-openapi/jsonpointer +* https://github.com/go-openapi/jsonpointer ## References -http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 -http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03 +* http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 +* http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03 diff --git a/vendor/github.com/go-openapi/loads/.golangci.yml b/vendor/github.com/go-openapi/loads/.golangci.yml index d48b4a51..22f8d21c 100644 --- a/vendor/github.com/go-openapi/loads/.golangci.yml +++ b/vendor/github.com/go-openapi/loads/.golangci.yml @@ -4,41 +4,58 @@ linters-settings: golint: min-confidence: 0 gocyclo: - min-complexity: 30 + min-complexity: 45 maligned: suggest-new: true dupl: - threshold: 100 + threshold: 200 goconst: min-len: 2 - min-occurrences: 4 + min-occurrences: 3 linters: enable-all: true disable: - maligned + - unparam - lll - - gochecknoglobals - gochecknoinits + - gochecknoglobals + - funlen - godox - gocognit - whitespace - wsl - - funlen - - gochecknoglobals - - gochecknoinits - - scopelint - wrapcheck - - exhaustivestruct - - exhaustive - - nlreturn - testpackage - - gci - - gofumpt - - goerr113 + - nlreturn - gomnd - - tparallel + - exhaustivestruct + - goerr113 + - errorlint - nestif - godot - - errorlint + - gofumpt - paralleltest + - tparallel + - thelper + - ifshort + - exhaustruct + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam + - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + - deadcode + - interfacer + - scopelint + - varcheck + - structcheck + - golint + - nosnakecase diff --git a/vendor/github.com/go-openapi/loads/README.md b/vendor/github.com/go-openapi/loads/README.md index df1f6264..6b8307c8 100644 --- a/vendor/github.com/go-openapi/loads/README.md +++ b/vendor/github.com/go-openapi/loads/README.md @@ -1,4 +1,4 @@ -# Loads OAI specs [![Build Status](https://travis-ci.org/go-openapi/loads.svg?branch=master)](https://travis-ci.org/go-openapi/loads) [![codecov](https://codecov.io/gh/go-openapi/loads/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/loads) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) [![Actions/Go Test Status](https://github.com/go-openapi/loads/workflows/Go%20Test/badge.svg)](https://github.com/go-openapi/loads/actions?query=workflow%3A"Go+Test") +# Loads OAI specs [![Build Status](https://github.com/go-openapi/loads/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/loads/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/loads/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/lods) [![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/loads/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/loads?status.svg)](http://godoc.org/github.com/go-openapi/loads) [![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/loads)](https://goreportcard.com/report/github.com/go-openapi/loads) diff --git a/vendor/github.com/go-openapi/loads/TODO.md b/vendor/github.com/go-openapi/loads/TODO.md new file mode 100644 index 00000000..103937ac --- /dev/null +++ b/vendor/github.com/go-openapi/loads/TODO.md @@ -0,0 +1,3 @@ +[x] why not filepath in JSONDoc() +[] integration tests package +[] relint diff --git a/vendor/github.com/go-openapi/loads/doc.go b/vendor/github.com/go-openapi/loads/doc.go index 3046da4c..5bcaef5d 100644 --- a/vendor/github.com/go-openapi/loads/doc.go +++ b/vendor/github.com/go-openapi/loads/doc.go @@ -12,10 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -/* -Package loads provides document loading methods for swagger (OAI) specifications. - -It is used by other go-openapi packages to load and run analysis on local or remote spec documents. - -*/ +// Package loads provides document loading methods for swagger (OAI) specifications. +// +// It is used by other go-openapi packages to load and run analysis on local or remote spec documents. package loads diff --git a/vendor/github.com/go-openapi/loads/loaders.go b/vendor/github.com/go-openapi/loads/loaders.go index 44bd32b5..705275cb 100644 --- a/vendor/github.com/go-openapi/loads/loaders.go +++ b/vendor/github.com/go-openapi/loads/loaders.go @@ -86,7 +86,7 @@ func (l *loader) Load(path string) (json.RawMessage, error) { return nil, erp } - var lastErr error = errors.New("no loader matched") // default error if no match was found + lastErr := errors.New("no loader matched") // default error if no match was found for ldr := l; ldr != nil; ldr = ldr.Next { if ldr.Match != nil && !ldr.Match(path) { continue @@ -118,9 +118,8 @@ func JSONDoc(path string) (json.RawMessage, error) { // This sets the configuration at the package level. // // NOTE: -// * this updates the default loader used by github.com/go-openapi/spec -// * since this sets package level globals, you shouln't call this concurrently -// +// - this updates the default loader used by github.com/go-openapi/spec +// - since this sets package level globals, you shouln't call this concurrently func AddLoader(predicate DocMatcher, load DocLoader) { loaders = loaders.WithHead(&loader{ DocLoaderWithMatch: DocLoaderWithMatch{ diff --git a/vendor/github.com/go-openapi/loads/spec.go b/vendor/github.com/go-openapi/loads/spec.go index 93c8d4b8..c9039cd5 100644 --- a/vendor/github.com/go-openapi/loads/spec.go +++ b/vendor/github.com/go-openapi/loads/spec.go @@ -38,8 +38,8 @@ type Document struct { specFilePath string origSpec *spec.Swagger schema *spec.Schema - raw json.RawMessage pathLoader *loader + raw json.RawMessage } // JSONSpec loads a spec from a json document @@ -49,7 +49,14 @@ func JSONSpec(path string, options ...LoaderOption) (*Document, error) { return nil, err } // convert to json - return Analyzed(data, "", options...) + doc, err := Analyzed(data, "", options...) + if err != nil { + return nil, err + } + + doc.specFilePath = path + + return doc, nil } // Embedded returns a Document based on embedded specs. No analysis is required @@ -71,7 +78,6 @@ func Embedded(orig, flat json.RawMessage, options ...LoaderOption) (*Document, e // Spec loads a new spec document from a local or remote path func Spec(path string, options ...LoaderOption) (*Document, error) { - ldr := loaderFromOptions(options) b, err := ldr.Load(path) @@ -84,12 +90,10 @@ func Spec(path string, options ...LoaderOption) (*Document, error) { return nil, err } - if document != nil { - document.specFilePath = path - document.pathLoader = ldr - } + document.specFilePath = path + document.pathLoader = ldr - return document, err + return document, nil } // Analyzed creates a new analyzed spec document for a root json.RawMessage. @@ -117,7 +121,7 @@ func Analyzed(data json.RawMessage, version string, options ...LoaderOption) (*D } d := &Document{ - Analyzer: analysis.New(swspec), + Analyzer: analysis.New(swspec), // NOTE: at this moment, analysis does not follow $refs to documents outside the root doc schema: spec.MustLoadSwagger20Schema(), spec: swspec, raw: raw, @@ -152,9 +156,8 @@ func trimData(in json.RawMessage) (json.RawMessage, error) { return d, nil } -// Expanded expands the ref fields in the spec document and returns a new spec document +// Expanded expands the $ref fields in the spec document and returns a new spec document func (d *Document) Expanded(options ...*spec.ExpandOptions) (*Document, error) { - swspec := new(spec.Swagger) if err := json.Unmarshal(d.raw, swspec); err != nil { return nil, err @@ -163,6 +166,9 @@ func (d *Document) Expanded(options ...*spec.ExpandOptions) (*Document, error) { var expandOptions *spec.ExpandOptions if len(options) > 0 { expandOptions = options[0] + if expandOptions.RelativeBase == "" { + expandOptions.RelativeBase = d.specFilePath + } } else { expandOptions = &spec.ExpandOptions{ RelativeBase: d.specFilePath, @@ -194,7 +200,7 @@ func (d *Document) Expanded(options ...*spec.ExpandOptions) (*Document, error) { return dd, nil } -// BasePath the base path for this spec +// BasePath the base path for the API specified by this spec func (d *Document) BasePath() string { return d.spec.BasePath } @@ -242,8 +248,11 @@ func (d *Document) ResetDefinitions() *Document { // Pristine creates a new pristine document instance based on the input data func (d *Document) Pristine() *Document { - dd, _ := Analyzed(d.Raw(), d.Version()) + raw, _ := json.Marshal(d.Spec()) + dd, _ := Analyzed(raw, d.Version()) dd.pathLoader = d.pathLoader + dd.specFilePath = d.specFilePath + return dd } diff --git a/vendor/github.com/go-openapi/runtime/.golangci.yml b/vendor/github.com/go-openapi/runtime/.golangci.yml index b1aa7928..1c75557b 100644 --- a/vendor/github.com/go-openapi/runtime/.golangci.yml +++ b/vendor/github.com/go-openapi/runtime/.golangci.yml @@ -1,44 +1,62 @@ linters-settings: govet: - # Using err repeatedly considered as shadowing. - check-shadowing: false + check-shadowing: true golint: min-confidence: 0 gocyclo: - min-complexity: 30 + min-complexity: 45 maligned: suggest-new: true dupl: - threshold: 100 + threshold: 200 goconst: min-len: 2 - min-occurrences: 4 + min-occurrences: 3 + linters: + enable-all: true disable: + - nilerr # nilerr crashes on this repo - maligned + - unparam - lll + - gochecknoinits - gochecknoglobals + - funlen - godox - gocognit - whitespace - wsl - - funlen - - gochecknoglobals - - gochecknoinits - - scopelint - wrapcheck - - exhaustivestruct - - exhaustive - - nlreturn - testpackage - - gci - - gofumpt - - goerr113 + - nlreturn - gomnd - - tparallel + - exhaustivestruct + - goerr113 + - errorlint - nestif - godot - - errorlint - - noctx + - gofumpt + - paralleltest + - tparallel + - thelper + - ifshort + - exhaustruct + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam + - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + - deadcode - interfacer - - nilerr + - scopelint + - varcheck + - structcheck + - golint + - nosnakecase diff --git a/vendor/github.com/go-openapi/runtime/README.md b/vendor/github.com/go-openapi/runtime/README.md index 5b1ec649..b07e0ad9 100644 --- a/vendor/github.com/go-openapi/runtime/README.md +++ b/vendor/github.com/go-openapi/runtime/README.md @@ -1,7 +1,10 @@ -# runtime [![Build Status](https://travis-ci.org/go-openapi/runtime.svg?branch=client-context)](https://travis-ci.org/go-openapi/runtime) [![codecov](https://codecov.io/gh/go-openapi/runtime/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/runtime) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +# runtime [![Build Status](https://github.com/go-openapi/runtime/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/runtime/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/runtime/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/runtime) -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/runtime/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/runtime?status.svg)](http://godoc.org/github.com/go-openapi/runtime) +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/runtime/master/LICENSE) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/runtime.svg)](https://pkg.go.dev/github.com/go-openapi/runtime) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/runtime)](https://goreportcard.com/report/github.com/go-openapi/runtime) -# golang Open-API toolkit - runtime +# go OpenAPI toolkit runtime -The runtime component for use in codegeneration or as untyped usage. +The runtime component for use in code generation or as untyped usage. diff --git a/vendor/github.com/go-openapi/runtime/bytestream.go b/vendor/github.com/go-openapi/runtime/bytestream.go index 6eb6ceb5..0a6b8ec6 100644 --- a/vendor/github.com/go-openapi/runtime/bytestream.go +++ b/vendor/github.com/go-openapi/runtime/bytestream.go @@ -52,14 +52,15 @@ func ByteStreamConsumer(opts ...byteStreamOpt) Consumer { return errors.New("ByteStreamConsumer requires a reader") // early exit } - close := defaultCloser + closer := defaultCloser if vals.Close { if cl, ok := reader.(io.Closer); ok { - close = cl.Close + closer = cl.Close } } - //nolint:errcheck // closing a reader wouldn't fail. - defer close() + defer func() { + _ = closer() + }() if wrtr, ok := data.(io.Writer); ok { _, err := io.Copy(wrtr, reader) @@ -109,14 +110,15 @@ func ByteStreamProducer(opts ...byteStreamOpt) Producer { if writer == nil { return errors.New("ByteStreamProducer requires a writer") // early exit } - close := defaultCloser + closer := defaultCloser if vals.Close { if cl, ok := writer.(io.Closer); ok { - close = cl.Close + closer = cl.Close } } - //nolint:errcheck // TODO: closing a writer would fail. - defer close() + defer func() { + _ = closer() + }() if rc, ok := data.(io.ReadCloser); ok { defer rc.Close() diff --git a/vendor/github.com/go-openapi/runtime/client_operation.go b/vendor/github.com/go-openapi/runtime/client_operation.go index fa21eacf..5a5d6356 100644 --- a/vendor/github.com/go-openapi/runtime/client_operation.go +++ b/vendor/github.com/go-openapi/runtime/client_operation.go @@ -30,12 +30,12 @@ type ClientOperation struct { AuthInfo ClientAuthInfoWriter Params ClientRequestWriter Reader ClientResponseReader - Context context.Context + Context context.Context //nolint:containedctx // we precisely want this type to contain the request context Client *http.Client } // A ClientTransport implementor knows how to submit Request objects to some destination type ClientTransport interface { - //Submit(string, RequestWriter, ResponseReader, AuthInfoWriter) (interface{}, error) + // Submit(string, RequestWriter, ResponseReader, AuthInfoWriter) (interface{}, error) Submit(*ClientOperation) (interface{}, error) } diff --git a/vendor/github.com/go-openapi/runtime/client_request.go b/vendor/github.com/go-openapi/runtime/client_request.go index d4d2b58f..4ebb2dea 100644 --- a/vendor/github.com/go-openapi/runtime/client_request.go +++ b/vendor/github.com/go-openapi/runtime/client_request.go @@ -37,8 +37,8 @@ type ClientRequestWriter interface { } // ClientRequest is an interface for things that know how to -// add information to a swagger client request -type ClientRequest interface { +// add information to a swagger client request. +type ClientRequest interface { //nolint:interfacebloat // a swagger-capable request is quite rich, hence the many getter/setters SetHeaderParam(string, ...string) error GetHeaderParams() http.Header diff --git a/vendor/github.com/go-openapi/runtime/request.go b/vendor/github.com/go-openapi/runtime/request.go index 078fda17..9e3e1ecb 100644 --- a/vendor/github.com/go-openapi/runtime/request.go +++ b/vendor/github.com/go-openapi/runtime/request.go @@ -16,6 +16,8 @@ package runtime import ( "bufio" + "context" + "errors" "io" "net/http" "strings" @@ -96,10 +98,16 @@ func (p *peekingReader) Read(d []byte) (int, error) { if p == nil { return 0, io.EOF } + if p.underlying == nil { + return 0, io.ErrUnexpectedEOF + } return p.underlying.Read(d) } func (p *peekingReader) Close() error { + if p.underlying == nil { + return errors.New("reader already closed") + } p.underlying = nil if p.orig != nil { return p.orig.Close() @@ -107,9 +115,11 @@ func (p *peekingReader) Close() error { return nil } -// JSONRequest creates a new http request with json headers set +// JSONRequest creates a new http request with json headers set. +// +// It uses context.Background. func JSONRequest(method, urlStr string, body io.Reader) (*http.Request, error) { - req, err := http.NewRequest(method, urlStr, body) + req, err := http.NewRequestWithContext(context.Background(), method, urlStr, body) if err != nil { return nil, err } diff --git a/vendor/github.com/go-openapi/spec/.gitignore b/vendor/github.com/go-openapi/spec/.gitignore index dd91ed6a..f47cb204 100644 --- a/vendor/github.com/go-openapi/spec/.gitignore +++ b/vendor/github.com/go-openapi/spec/.gitignore @@ -1,2 +1 @@ -secrets.yml -coverage.out +*.out diff --git a/vendor/github.com/go-openapi/spec/.golangci.yml b/vendor/github.com/go-openapi/spec/.golangci.yml index 835d55e7..22f8d21c 100644 --- a/vendor/github.com/go-openapi/spec/.golangci.yml +++ b/vendor/github.com/go-openapi/spec/.golangci.yml @@ -11,7 +11,7 @@ linters-settings: threshold: 200 goconst: min-len: 2 - min-occurrences: 2 + min-occurrences: 3 linters: enable-all: true @@ -40,3 +40,22 @@ linters: - tparallel - thelper - ifshort + - exhaustruct + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam + - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + - deadcode + - interfacer + - scopelint + - varcheck + - structcheck + - golint + - nosnakecase diff --git a/vendor/github.com/go-openapi/spec/README.md b/vendor/github.com/go-openapi/spec/README.md index 18782c6d..822172ff 100644 --- a/vendor/github.com/go-openapi/spec/README.md +++ b/vendor/github.com/go-openapi/spec/README.md @@ -1,8 +1,5 @@ -# OAI object model +# OpenAPI v2 object model [![Build Status](https://github.com/go-openapi/spec/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/spec/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/spec/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/spec) -[![Build Status](https://travis-ci.org/go-openapi/spec.svg?branch=master)](https://travis-ci.org/go-openapi/spec) - -[![codecov](https://codecov.io/gh/go-openapi/spec/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/spec) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) [![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/spec/master/LICENSE) [![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/spec.svg)](https://pkg.go.dev/github.com/go-openapi/spec) diff --git a/vendor/github.com/go-openapi/spec/appveyor.yml b/vendor/github.com/go-openapi/spec/appveyor.yml deleted file mode 100644 index 09035939..00000000 --- a/vendor/github.com/go-openapi/spec/appveyor.yml +++ /dev/null @@ -1,32 +0,0 @@ -version: "0.1.{build}" - -clone_folder: C:\go-openapi\spec -shallow_clone: true # for startup speed -pull_requests: - do_not_increment_build_number: true - -#skip_tags: true -#skip_branch_with_pr: true - -# appveyor.yml -build: off - -environment: - GOPATH: c:\gopath - -stack: go 1.15 - -test_script: - - go test -v -timeout 20m ./... - -deploy: off - -notifications: - - provider: Slack - incoming_webhook: https://hooks.slack.com/services/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 - auth_token: - secure: Sf7kZf7ZGbnwWUMpffHwMu5A0cHkLK2MYY32LNTPj4+/3qC3Ghl7+9v4TSLOqOlCwdRNjOGblAq7s+GDJed6/xgRQl1JtCi1klzZNrYX4q01pgTPvvGcwbBkIYgeMaPeIRcK9OZnud7sRXdttozgTOpytps2U6Js32ip7uj5mHSg2ub0FwoSJwlS6dbezZ8+eDhoha0F/guY99BEwx8Bd+zROrT2TFGsSGOFGN6wFc7moCqTHO/YkWib13a2QNXqOxCCVBy/lt76Wp+JkeFppjHlzs/2lP3EAk13RIUAaesdEUHvIHrzCyNJEd3/+KO2DzsWOYfpktd+KBCvgaYOsoo7ubdT3IROeAegZdCgo/6xgCEsmFc9ZcqCfN5yNx2A+BZ2Vwmpws+bQ1E1+B5HDzzaiLcYfG4X2O210QVGVDLWsv1jqD+uPYeHY2WRfh5ZsIUFvaqgUEnwHwrK44/8REAhQavt1QAj5uJpsRd7CkRVPWRNK+yIky+wgbVUFEchRNmS55E7QWf+W4+4QZkQi7vUTMc9nbTUu2Es9NfvfudOpM2wZbn98fjpb/qq/nRv6Bk+ca+7XD5/IgNLMbWp2ouDdzbiHLCOfDUiHiDJhLfFZx9Bwo7ZwfzeOlbrQX66bx7xRKYmOe4DLrXhNcpbsMa8qbfxlZRCmYbubB/Y8h4= - channel: bots - on_build_success: false - on_build_failure: true - on_build_status_changed: true diff --git a/vendor/github.com/go-openapi/spec/bindata.go b/vendor/github.com/go-openapi/spec/bindata.go deleted file mode 100644 index afc83850..00000000 --- a/vendor/github.com/go-openapi/spec/bindata.go +++ /dev/null @@ -1,297 +0,0 @@ -// Code generated by go-bindata. DO NOT EDIT. -// sources: -// schemas/jsonschema-draft-04.json (4.357kB) -// schemas/v2/schema.json (40.248kB) - -package spec - -import ( - "bytes" - "compress/gzip" - "crypto/sha256" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - "time" -) - -func bindataRead(data []byte, name string) ([]byte, error) { - gz, err := gzip.NewReader(bytes.NewBuffer(data)) - if err != nil { - return nil, fmt.Errorf("read %q: %v", name, err) - } - - var buf bytes.Buffer - _, err = io.Copy(&buf, gz) - clErr := gz.Close() - - if err != nil { - return nil, fmt.Errorf("read %q: %v", name, err) - } - if clErr != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -type asset struct { - bytes []byte - info os.FileInfo - digest [sha256.Size]byte -} - -type bindataFileInfo struct { - name string - size int64 - mode os.FileMode - modTime time.Time -} - -func (fi bindataFileInfo) Name() string { - return fi.name -} -func (fi bindataFileInfo) Size() int64 { - return fi.size -} -func (fi bindataFileInfo) Mode() os.FileMode { - return fi.mode -} -func (fi bindataFileInfo) ModTime() time.Time { - return fi.modTime -} -func (fi bindataFileInfo) IsDir() bool { - return false -} -func (fi bindataFileInfo) Sys() interface{} { - return nil -} - -var _jsonschemaDraft04Json = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xc4\x57\x3d\x6f\xdb\x3c\x10\xde\xf3\x2b\x08\x26\x63\xf2\x2a\x2f\xd0\xc9\x5b\xd1\x2e\x01\x5a\x34\x43\x37\x23\x03\x6d\x9d\x6c\x06\x14\xa9\x50\x54\x60\xc3\xd0\x7f\x2f\x28\x4a\x14\x29\x91\x92\x2d\xa7\x8d\x97\x28\xbc\xaf\xe7\x8e\xf7\xc5\xd3\x0d\x42\x08\x61\x9a\xe2\x15\xc2\x7b\xa5\x8a\x55\x92\xbc\x96\x82\x3f\x94\xdb\x3d\xe4\xe4\x3f\x21\x77\x49\x2a\x49\xa6\x1e\x1e\xbf\x24\xe6\xec\x16\xdf\x1b\xa1\x3b\xf3\xff\x02\xc9\x14\xca\xad\xa4\x85\xa2\x82\x6b\xe9\x6f\x42\x02\x32\x2c\x28\x07\x45\x5a\x15\x3d\x77\x46\x39\xd5\xcc\x25\x5e\x21\x83\xb8\x21\x18\xb6\xaf\x52\x92\xa3\x47\x68\x88\xea\x58\x80\x56\x4e\x1a\xf2\xbd\x4f\xcc\x29\x7f\x52\x90\x6b\x7d\xff\x0f\x48\xb4\x3d\x3f\x21\x7c\x27\x21\xd3\x2a\x6e\x31\xaa\x2d\x53\xdd\xf3\xe3\x42\x94\x54\xd1\x77\x78\xe2\x0a\x76\x20\xe3\x20\x68\xcb\x30\x86\x41\xf3\x2a\xc7\x2b\xf4\x78\x8e\xfe\xef\x90\x91\x8a\xa9\xc7\xb1\x1d\xc2\xd8\x2f\x0d\x75\xed\xc1\x4e\x9c\xc8\x25\x43\xac\xa8\xbe\xd7\xcc\xa9\xd1\xa9\x21\xa0\x1a\xbd\x04\x61\x94\x34\x2f\x18\xfc\x3e\x16\x50\x8e\x4d\x03\x6f\x1c\x58\xdb\x48\x23\xbc\x11\x82\x01\xe1\xfa\xd3\x3a\x8e\x30\xaf\x18\x33\x7f\xf3\x8d\x39\x11\x9b\x57\xd8\x2a\xfd\x55\x2a\x49\xf9\x0e\xc7\xec\x37\xd4\x25\xf7\xec\x5c\x66\xc7\xd7\x99\xaa\xcf\x4f\x89\x8a\xd3\xb7\x0a\x3a\xaa\x92\x15\xf4\x30\x6f\x1c\xb0\xd6\x46\xe7\x98\x39\x2d\xa4\x28\x40\x2a\x3a\x88\x9e\x29\xba\x88\x37\x2d\xca\x60\x38\xfa\xba\x5b\x20\xac\xa8\x62\xb0\x4c\xd4\xaf\xda\x45\x0a\xba\x5c\x3b\xb9\xc7\x79\xc5\x14\x2d\x18\x34\x19\x1c\x51\xdb\x25\x4d\xb4\x7e\x06\x14\x38\x6c\x59\x55\xd2\x77\xf8\x69\x59\xfc\x7b\x73\xed\x93\x43\xcb\x32\x6d\x3c\x28\xdc\x1b\x9a\xd3\x62\xab\xc2\x27\xf7\x41\xc9\x08\x2b\x23\x08\xad\x13\x57\x21\x9c\xd3\x72\x0d\x42\x72\xf8\x01\x7c\xa7\xf6\x83\xce\x39\xd7\x82\x3c\x1f\x2f\xd6\x60\x1b\xa2\xdf\x35\x89\x52\x20\xe7\x73\x74\xe0\x66\x26\x64\x4e\xb4\x97\x58\xc2\x0e\x0e\xe1\x60\x92\x34\x6d\xa0\x10\xd6\xb5\x83\x61\x27\xe6\x47\xd3\x89\xbd\x63\xfd\x3b\x8d\x03\x3d\x6c\x42\x2d\x5b\x70\xee\xe8\xdf\x4b\xf4\x66\x4e\xe1\x01\x45\x17\x80\x74\xad\x4f\xc3\xf3\xae\xc6\x1d\xc6\xd7\xc2\xce\xc9\xe1\x29\x30\x86\x2f\x4a\xa6\x4b\x15\x84\x73\xc9\x6f\xfd\x7f\xa5\x6e\x9e\xbd\xf1\xb0\xd4\xdd\x45\x5a\xc2\x3e\x4b\x78\xab\xa8\x84\x74\x4a\x91\x3b\x92\x23\x05\xf2\x1c\x1e\x7b\xf3\x09\xf8\xcf\xab\x24\xb6\x60\xa2\xe8\x4c\x9f\x75\x77\xaa\x8c\xe6\x01\x45\x36\x86\xcf\xc3\x63\x3a\xea\xd4\x8d\x7e\x06\xac\x14\x0a\xe0\x29\xf0\xed\x07\x22\x1a\x65\xda\x44\xae\xa2\x73\x1a\xe6\x90\x69\xa2\x8c\x46\xb2\x2f\xde\x49\x38\x08\xed\xfe\xfd\x41\xaf\x9f\xa9\x55\xd7\xdd\x22\x8d\xfa\x45\x63\xc5\x0f\x80\xf3\xb4\x08\xd6\x79\x30\x9e\x93\xee\x59\xa6\xd0\x4b\xee\x22\xe3\x33\xc1\x3a\x27\x68\x36\x78\x7e\x87\x0a\x06\xd5\x2e\x20\xd3\xaf\x15\xfb\xd8\x3b\x73\x14\xbb\x92\xed\x05\x5d\x2e\x29\x38\x2c\x94\xe4\x42\x45\x5e\xd3\xb5\x7d\xdf\x47\xca\x38\xb4\x5c\xaf\xfb\x7d\xdd\x6d\xf4\xa1\x2d\x77\xdd\x2f\xce\x6d\xc4\x7b\x8b\x4e\x67\xa9\x6f\xfe\x04\x00\x00\xff\xff\xb1\xd1\x27\x78\x05\x11\x00\x00") - -func jsonschemaDraft04JsonBytes() ([]byte, error) { - return bindataRead( - _jsonschemaDraft04Json, - "jsonschema-draft-04.json", - ) -} - -func jsonschemaDraft04Json() (*asset, error) { - bytes, err := jsonschemaDraft04JsonBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "jsonschema-draft-04.json", size: 4357, mode: os.FileMode(0640), modTime: time.Unix(1568963823, 0)} - a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe1, 0x48, 0x9d, 0xb, 0x47, 0x55, 0xf0, 0x27, 0x93, 0x30, 0x25, 0x91, 0xd3, 0xfc, 0xb8, 0xf0, 0x7b, 0x68, 0x93, 0xa8, 0x2a, 0x94, 0xf2, 0x48, 0x95, 0xf8, 0xe4, 0xed, 0xf1, 0x1b, 0x82, 0xe2}} - return a, nil -} - -var _v2SchemaJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x5d\x4f\x93\xdb\x36\xb2\xbf\xfb\x53\xa0\x14\x57\xd9\xae\xd8\x92\xe3\xf7\x2e\xcf\x97\xd4\xbc\xd8\x49\x66\x37\x5e\x4f\x79\x26\xbb\x87\x78\x5c\x05\x91\x2d\x09\x09\x09\x30\x00\x38\x33\x5a\xef\x7c\xf7\x2d\xf0\x9f\x08\x02\x20\x41\x8a\xd2\xc8\x0e\x0f\xa9\x78\x28\xa0\xd1\xdd\x68\x34\x7e\xdd\xf8\xf7\xf9\x11\x42\x33\x49\x64\x04\xb3\xd7\x68\x76\x86\xfe\x76\xf9\xfe\x1f\xe8\x32\xd8\x40\x8c\xd1\x8a\x71\x74\x79\x8b\xd7\x6b\xe0\xe8\xd5\xfc\x25\x3a\xbb\x38\x9f\xcf\x9e\xab\x0a\x24\x54\xa5\x37\x52\x26\xaf\x17\x0b\x91\x17\x99\x13\xb6\xb8\x79\xb5\x10\x59\xdd\xf9\xef\x82\xd1\x6f\xf2\xc2\x8f\xf3\x4f\xb5\x1a\xea\xc7\x17\x45\x41\xc6\xd7\x8b\x90\xe3\x95\x7c\xf1\xf2\x7f\x8b\xca\x45\x3d\xb9\x4d\x32\xa6\xd8\xf2\x77\x08\x64\xfe\x8d\xc3\x9f\x29\xe1\xa0\x9a\xff\xed\x11\x42\x08\xcd\x8a\xd6\xb3\x9f\x15\x67\x74\xc5\xca\x7f\x27\x58\x6e\xc4\xec\x11\x42\xd7\x59\x5d\x1c\x86\x44\x12\x46\x71\x74\xc1\x59\x02\x5c\x12\x10\xb3\xd7\x68\x85\x23\x01\x59\x81\x04\x4b\x09\x9c\x6a\xbf\x7e\xce\x49\x7d\xba\x7b\x51\xfd\xa1\x44\xe2\xb0\x52\xac\x7d\xb3\x08\x61\x45\x68\x46\x56\x2c\x6e\x80\x86\x8c\xbf\xbd\x93\x40\x05\x61\x74\x96\x95\xbe\x7f\x84\xd0\x7d\x4e\xde\x42\xb7\xe4\xbe\x46\xbb\x14\x5b\x48\x4e\xe8\xba\x90\x05\xa1\x19\xd0\x34\xae\xc4\xce\xbe\xbc\x9a\xbf\x9c\x15\x7f\x5d\x57\xc5\x42\x10\x01\x27\x89\xe2\x48\x51\xb9\xda\x40\xd5\x87\x37\xc0\x15\x5f\x88\xad\x90\xdc\x10\x81\x42\x16\xa4\x31\x50\x39\x2f\x38\xad\xab\xb0\x53\xd8\xac\x94\x56\x6f\xc3\x84\xf4\x11\xa4\x50\xb3\xfa\xe9\xd3\x6f\x9f\x3e\xdf\x2f\xd0\xeb\x8f\x1f\x3f\x7e\xbc\xfe\xf6\xe9\xf7\xaf\x5f\x7f\xfc\x18\x7e\xfb\xec\xfb\xc7\xb3\x36\x79\x54\x43\xe8\x29\xc5\x31\x20\xc6\x11\x49\x9e\xe5\x12\x41\x66\xa0\xe8\xed\x1d\x8e\x93\x08\x5e\xa3\x27\x3b\xc3\x7c\xa2\x73\xba\xc4\x02\x2e\xb0\xdc\xf4\xe5\x76\xd1\xca\x96\xa2\x8a\x94\xcd\x21\xc9\x6c\xec\x2c\x70\x42\x9e\x34\x74\x9d\x19\x7c\xcd\x20\x9c\xea\x2e\x0a\xfe\x42\x84\xd4\x29\x04\x8c\x8a\xb4\x41\xa2\xc1\xdc\x19\x8a\x88\x90\x4a\x49\xef\xce\xdf\xbd\x45\x4a\x52\x81\x70\x10\x40\x22\x21\x44\xcb\x6d\xc5\xec\x4e\x3c\x1c\x45\xef\x57\x9a\xb5\x7d\xae\xfe\xe5\xe4\x31\x86\x90\xe0\xab\x6d\x02\x3b\x2e\xcb\x11\x90\xd9\xa8\xc6\x77\xc2\x59\x98\x06\xfd\xf9\x2e\x78\x45\x01\xa6\xa8\xa0\x71\x5c\xbe\x33\xa7\xd2\xd9\x5f\x95\xef\xd9\xd5\xac\xfd\xdc\x5d\xbf\x5e\xb8\xd1\x3e\xc7\x31\x48\xe0\x5e\x4c\x14\x65\xdf\xb8\xa8\x71\x10\x09\xa3\xc2\xc7\x02\xcb\xa2\x4e\x5a\x02\x82\x94\x13\xb9\xf5\x30\xe6\xb2\xa4\xb5\xfe\x9b\x3e\x7a\xb2\x55\xd2\xa8\x4a\xbc\x16\xb6\x71\x8e\x39\xc7\xdb\x9d\xe1\x10\x09\x71\xbd\x9c\xb3\x41\x89\xd7\xa5\x89\xdc\x57\xb5\x53\x4a\xfe\x4c\xe1\xbc\xa0\x21\x79\x0a\x1a\x0f\x70\xa7\x5c\x08\x8e\xde\xb0\xc0\x43\x24\xad\x74\x63\x0e\xb1\xd9\x90\xe1\xb0\x2d\x13\xa7\x6d\x78\xfd\x04\x14\x38\x8e\x90\xaa\xce\x63\xac\x3e\x23\xbc\x64\xa9\xb4\xf8\x03\x63\xde\xcd\xbe\x16\x13\x4a\x55\xac\x82\x12\xc6\xac\xd4\x35\xf7\x22\xd4\x3a\xff\x22\x73\x0e\x6e\x51\xa0\x75\x1e\xae\x8f\xe8\x5d\xc7\x59\xe6\xe4\x9a\x18\x8d\xd6\x1c\x53\x84\x4d\xb7\x67\x28\x37\x09\x84\x69\x88\x12\x0e\x01\x11\x80\x32\xa2\xf5\xb9\xaa\xc6\xd9\x73\x53\xab\xfb\xb4\x2e\x20\xc6\x54\x92\xa0\x9a\xf3\x69\x1a\x2f\x81\x77\x37\xae\x53\x1a\xce\x40\xc4\xa8\x82\x1c\xb5\xef\xda\x24\x7d\xb9\x61\x69\x14\xa2\x25\xa0\x90\xac\x56\xc0\x81\x4a\xb4\xe2\x2c\xce\x4a\x64\x7a\x9a\x23\xf4\x13\x91\x3f\xa7\x4b\xf4\x63\x84\x6f\x18\x87\x10\xbd\xc3\xfc\x8f\x90\xdd\x52\x44\x04\xc2\x51\xc4\x6e\x21\x74\x48\x21\x81\xc7\xe2\xfd\xea\x12\xf8\x0d\x09\xf6\xe9\x47\x35\xaf\x67\xc4\x14\xf7\x22\x27\x97\xe1\xe2\x76\x2d\x06\x8c\x4a\x1c\x48\x3f\x73\x2d\x0b\x5b\x29\x45\x24\x00\x2a\x0c\x11\xec\x94\xca\xc2\xa6\xc1\x37\x21\x43\x83\x3b\x5f\x97\xf1\x43\x5e\x53\x73\x19\xa5\x36\xd8\x2d\x05\x2e\x34\x0b\xeb\x39\xfc\x1d\x63\x51\x01\xbd\x3d\xbb\x90\x84\x40\x25\x59\x6d\x09\x5d\xa3\x1c\x37\xe6\x5c\x16\x9a\x40\x09\x70\xc1\xe8\x82\xf1\x35\xa6\xe4\xdf\x99\x5c\x8e\x9e\x4d\x79\xb4\x27\x2f\xbf\x7e\xf8\x05\x25\x8c\x50\xa9\x98\x29\x90\x62\x60\xea\x75\xae\x13\xca\xbf\x2b\x1a\x29\x27\x76\xd6\x20\xc6\x64\x5f\xe6\x32\x1a\x08\x87\x21\x07\x21\xbc\xb4\xe4\xe0\x32\x67\xa6\xcd\xf3\x1e\xcd\xd9\x6b\xb6\x6f\x8e\x27\xa7\xed\xdb\xe7\xbc\xcc\x1a\x07\xce\x6f\x87\x33\xf0\xba\x51\x17\x22\x66\x78\x79\x8e\xce\xe5\x13\x81\x80\x06\x2c\xe5\x78\x0d\xa1\xb2\xb8\x54\xa8\x79\x09\xbd\xbf\x3c\x47\x01\x8b\x13\x2c\xc9\x32\xaa\xaa\x1d\xd5\xee\xab\x36\xbd\x6c\xfd\x54\x6c\xc8\x08\x01\x3c\xbd\xe7\x07\x88\xb0\x24\x37\x79\x90\x28\x4a\x1d\x10\x1a\x92\x1b\x12\xa6\x38\x42\x40\xc3\x4c\x43\x62\x8e\xae\x36\xb0\x45\x71\x2a\xa4\x9a\x23\x79\x59\xb1\xa8\xf2\xa4\x0c\x60\x9f\xcc\x8d\x40\xf5\x80\xca\xa8\x99\xc3\xa7\x85\x1f\x31\x25\xa9\x82\xc5\x6d\xbd\xd8\x36\x76\x7c\x02\x28\x97\xf6\x1d\x74\x3b\x11\x7e\x91\xae\x32\xf8\x6c\xf4\xe6\x7b\x9a\xa5\x1f\x62\xc6\x21\xcf\x9a\xe5\xed\x8b\x02\xf3\x2c\x33\x33\xdf\x00\xca\xc9\x09\xb4\x04\xf5\xa5\x08\xd7\xc3\x02\x18\x66\xf1\xab\x1e\x83\x37\x4c\xcd\x12\xc1\x1d\x50\xf6\xaa\xbd\xfe\xe2\x73\x48\x38\x08\xa0\x32\x9b\x18\x44\x86\x0b\x6a\xc1\xaa\x26\x96\x2d\x96\x3c\xa0\x54\x65\x73\xe3\x08\xb5\x8b\x99\xbd\x82\xbc\x9e\xc2\xe8\x53\x46\x83\x3f\x33\x54\x2b\x5b\xad\x92\x79\xd9\x8f\x5d\x93\x98\xf2\xe6\xc6\x1c\xe6\x9a\x9e\xfc\x43\x82\x31\x66\x8e\x53\x77\xfe\x90\xe7\xf3\xf6\xe9\x62\x23\x3f\x10\x93\x18\xae\x72\x1a\x9d\xf9\x48\xcb\xcc\x5a\x65\xc7\x4a\x04\xf0\xf3\xd5\xd5\x05\x8a\x41\x08\xbc\x86\x86\x43\x51\x6c\xe0\x46\x57\xf6\x44\x40\x0d\xfb\xff\xa2\xc3\x7c\x3d\x39\x84\xdc\x09\x22\x64\x4f\x12\xd9\xba\xaa\xf6\xe3\xbd\x56\xdd\x91\x25\x6a\x14\x9c\x89\x34\x8e\x31\xdf\xee\x15\x7e\x2f\x39\x81\x15\x2a\x28\x95\x66\x51\xf5\xfd\x83\xc5\xfe\x15\x07\xcf\xf7\x08\xee\x1d\x8e\xb6\xc5\x52\xcc\x8c\x5a\x93\x66\xc5\xd8\x79\x38\x46\xd6\xa7\x88\x37\xc9\x2e\xe3\xd2\xa5\x7b\x4b\x3a\xdc\xa1\xdc\x9e\x29\xf1\x8c\x8a\x99\x16\x47\x8d\xd4\x78\x8b\xf6\x1c\xe9\x71\x54\x1b\x69\xa8\x4a\x93\x37\xe5\xb2\x2c\x4f\x0c\x92\xab\xa0\x73\x32\x72\x59\xd3\xf0\x2d\x8d\xed\xca\x37\x16\x19\x9e\xdb\x1c\xab\x17\x49\xc3\x0f\x37\xdc\x88\xb1\xb4\xd4\x42\xcb\x58\x5e\x6a\x52\x0b\x15\x10\x0a\xb0\x04\xe7\xf8\x58\x32\x16\x01\xa6\xcd\x01\xb2\xc2\x69\x24\x35\x38\x6f\x30\x6a\xae\x1b\xb4\x71\xaa\xad\x1d\xa0\xd6\x20\x2d\x8b\x3c\xc6\x82\x62\x27\x34\x6d\x15\x84\x7b\x43\xb1\x35\x78\xa6\x24\x77\x28\xc1\x6e\xfc\xe9\x48\x74\xf4\x15\xe3\xe1\x84\x42\x88\x40\x7a\x26\x49\x3b\x48\xb1\xa4\x19\x8e\x0c\xa7\xb5\x01\x6c\x0c\x97\x61\x8a\xc2\x32\xd8\x8c\x44\x69\x24\xbf\x65\x1d\x74\xd6\xe5\x44\xef\xec\x48\x5e\xb7\x8a\xa3\x29\x8e\x41\x64\xce\x1f\x88\xdc\x00\x47\x4b\x40\x98\x6e\xd1\x0d\x8e\x48\x98\x63\x5c\x21\xb1\x4c\x05\x0a\x58\x98\xc5\x6d\x4f\x0a\x77\x53\x4f\x8b\xc4\x44\x1f\xb2\xdf\x8d\x3b\xea\x9f\xfe\xf6\xf2\xc5\xff\x5d\x7f\xfe\x9f\xfb\x67\x8f\xff\xf3\xe9\x69\xd1\xfe\xb3\xc7\xfd\x3c\xf8\x3f\x71\x94\x82\x23\xd1\x72\x00\xb7\x42\x99\x6c\xc0\x60\x7b\x0f\x79\xea\xa8\x53\x4b\x56\x31\xfa\x0b\x52\x9f\x96\xdb\xcd\x2f\xd7\x67\xcd\x04\x19\x85\xfe\xdb\x02\x9a\x59\x03\xad\x63\x3c\xea\xff\x2e\x18\xfd\x00\xd9\xe2\x56\x60\x59\x93\xb9\xb6\xb2\x3e\x3c\x2c\xab\x0f\xa7\xb2\x89\x43\xc7\xf6\xd5\xce\x2e\xad\xa6\xa9\xed\xa6\xc6\x5a\xb4\xa6\x67\xdf\x8c\x26\x7b\x50\x5a\x91\x08\x2e\x6d\xd4\x3a\xc1\x9d\xf2\xdb\xde\x1e\xb2\x2c\x6c\xa5\x64\xc9\x16\xb4\x90\xaa\x4a\xb7\x0c\xde\x13\xc3\x2a\x9a\x11\x9b\x7a\x1b\x3d\x95\x97\x37\x31\x6b\x69\x7e\x34\xc0\x67\x1f\x66\x19\x49\xef\xf1\x25\xf5\xac\x0e\xea\x0a\x28\x8d\x4d\x7e\xd9\x57\x4b\x49\xe5\xc6\xb3\x25\xfd\xe6\x57\x42\x25\xac\xcd\xcf\x36\x74\x8e\xca\x24\x47\xe7\x80\xa8\x92\x72\xbd\x3d\x84\x2d\x65\xe2\x82\x1a\x9c\xc4\x44\x92\x1b\x10\x79\x8a\xc4\x4a\x2f\x60\x51\x04\x81\xaa\xf0\xa3\x95\x27\xd7\x12\x7b\xa3\x96\x03\x45\x96\xc1\x8a\x07\xc9\xb2\xb0\x95\x52\x8c\xef\x48\x9c\xc6\x7e\x94\xca\xc2\x0e\x07\x12\x44\xa9\x20\x37\xf0\xae\x0f\x49\xa3\x96\x9d\x4b\x42\x7b\x70\x59\x14\xee\xe0\xb2\x0f\x49\xa3\x96\x4b\x97\xbf\x00\x5d\x4b\x4f\xfc\xbb\x2b\xee\x92\xb9\x17\xb5\xaa\xb8\x0b\x97\x17\x9b\x43\xfd\xd6\xc2\xb2\xc2\x2e\x29\xcf\xfd\x87\x4a\x55\xda\x25\x63\x1f\x5a\x65\x69\x2b\x2d\x3d\x67\xe9\x41\xae\x5e\xc1\x6e\x2b\xd4\xdb\x3e\xa8\xd3\x26\xd2\x48\x92\x24\xca\x61\x86\x8f\x8c\xbb\xf2\x8e\x91\xdf\x1f\x06\x19\x33\xf3\x03\x4d\xba\xcd\xe2\x2d\xfb\x69\xe9\x16\x15\x13\xd5\x56\x85\x4e\x3c\x5b\x8a\xbf\x25\x72\x83\xee\x5e\x20\x22\xf2\xc8\xaa\x7b\xdb\x8e\xe4\x29\x58\xca\x38\xb7\x3f\x2e\x59\xb8\xbd\xa8\x16\x16\xf7\xdb\x79\x51\x9f\x5a\xb4\x8d\x87\x3a\x6e\xbc\x3e\xc5\xb4\xcd\x58\xf9\xf5\x3c\xb9\x6f\x49\xaf\x57\xc1\xfa\x1c\x5d\x6d\x88\x8a\x8b\xd3\x28\xcc\xb7\xef\x10\x8a\x4a\x74\xa9\x4a\xa7\x62\xbf\x0d\x76\x23\x6f\x59\xd9\x31\xee\x40\x11\xfb\x28\xec\x8d\x22\x1c\x13\x5a\x64\x94\x23\x16\x60\xbb\xd2\x7c\xa0\x98\xb2\xe5\x6e\xbc\x54\x33\xe0\x3e\xb9\x52\x17\xdb\xb7\x1b\xc8\x12\x20\x8c\x23\xca\x64\x7e\x78\xa3\x62\x5b\x75\x56\xd9\x9e\x2a\x91\x27\xb0\x70\x34\x1f\x90\x89\xb5\x86\x73\x7e\x71\xda\x1e\xfb\x3a\x72\xdc\x5e\x79\x88\xcb\x74\x79\xd9\x64\xe4\xd4\xc2\x9e\xce\xb1\xfe\x85\x5a\xc0\xe9\x0c\x34\x3d\xd0\x43\xce\xa1\x36\x39\xd5\xa1\x4e\xf5\xf8\xb1\xa9\x23\x08\x75\x84\xac\x53\x6c\x3a\xc5\xa6\x53\x6c\x3a\xc5\xa6\x7f\xc5\xd8\xf4\x51\xfd\xff\x25\x4e\xfa\x33\x05\xbe\x9d\x60\xd2\x04\x93\x6a\x5f\x33\x9b\x98\x50\xd2\xe1\x50\x52\xc6\xcc\xdb\x38\x91\xdb\xe6\xaa\xa2\x8f\xa1\x6a\xa6\xd4\xc6\x56\xd6\x8c\x40\x02\x68\x48\xe8\x1a\xe1\x9a\xd9\x2e\xb7\x05\xc3\x34\xda\x2a\xbb\xcd\x12\x36\x98\x22\x50\x4c\xa1\x1b\xc5\xd5\x84\xf0\xbe\x24\x84\xf7\x2f\x22\x37\xef\x94\xd7\x9f\xa0\xde\x04\xf5\x26\xa8\x37\x41\x3d\x64\x40\x3d\xe5\xf2\xde\x60\x89\x27\xb4\x37\xa1\xbd\xda\xd7\xd2\x2c\x26\xc0\x37\x01\x3e\x1b\xef\x5f\x06\xe0\x6b\x7c\x5c\x91\x08\x26\x10\x38\x81\xc0\x09\x04\x76\x4a\x3d\x81\xc0\xbf\x12\x08\x4c\xb0\xdc\x7c\x99\x00\xd0\x75\x70\xb4\xf8\x5a\x7c\xea\xde\x3e\x39\x08\x30\x5a\x27\x35\xed\xb4\x65\xad\x69\x74\x10\x88\x79\xe2\x30\x52\x19\xd6\x04\x21\xa7\x95\xd5\x0e\x03\xf8\xda\x20\xd7\x84\xb4\x26\xa4\x35\x21\xad\x09\x69\x21\x03\x69\x51\x46\xff\xff\x18\x9b\x54\xed\x87\x47\x06\x9d\x4e\x73\x6e\x9a\xb3\xa9\xce\x83\x5e\x4b\xc6\x71\x20\x45\xd7\x72\xf5\x40\x72\x0e\x34\x6c\xf4\x6c\xf3\xba\x5e\x4b\x97\x0e\x52\xb8\xbe\x8b\x79\xa0\x10\x86\xa1\x75\xb0\x6f\xec\xc8\xf4\x3d\x4d\x7b\x86\xc2\x02\x31\x12\x51\xbf\x07\x94\xad\x10\xd6\x2e\x79\xcf\xe9\x1c\xf5\x1e\x31\x23\x5c\x18\xfb\x9c\xfb\x70\xe0\x62\xbd\xf7\xb5\x94\xcf\xf3\xf6\xfa\xc5\x4e\x9c\x85\x76\x1d\xae\x37\xbc\xde\xa3\x41\xcb\x29\xd0\x5e\x70\x67\x50\x93\x6d\x98\xa8\xd3\x67\x0f\x68\xb1\xeb\x38\x47\x07\x10\x1b\xd2\xe2\x18\x68\x6d\x40\xbb\xa3\x40\xba\x21\xf2\x8e\x81\xfb\xf6\x92\x77\x2f\x70\xe8\xdb\xb2\x36\xbf\x30\x91\xc5\x21\xe7\x45\xcc\x34\x0c\x48\x8e\xd0\xf2\x9b\x7c\x3c\xbd\x1c\x04\x3e\x07\xe8\x7c\x2f\x84\x7a\x48\x4d\x1f\xba\xe1\x76\x45\x7b\x60\xe0\x01\xca\xee\x04\xca\x31\xbe\x73\x5f\xa3\x70\x0c\xad\x1f\xa5\xf5\x76\xd5\xbb\xd2\x7e\xfb\x30\x90\xcf\xfa\x67\x7a\xe6\xc3\x37\x42\x19\xe2\xc9\x9c\x61\x4c\xe7\xd1\x77\x55\x86\x6e\x8f\x7b\x85\x42\x33\xa3\xaa\x57\xae\xfd\xd5\xcc\x9c\x56\x68\xe2\xde\x0e\xa8\x2c\xa9\xb0\x7d\xf0\x54\x2d\x80\xf2\x48\x39\x3d\x98\x1a\x6d\x0b\x9d\xba\x53\xfb\xce\xf8\xd1\x7e\xbb\x60\x4f\x06\xf5\xce\xda\xab\xeb\xca\xcb\xd5\xac\x20\xda\x72\x3b\xa2\x4b\x38\xd7\xb5\x89\xbe\x42\xd9\xb9\x73\xc4\x0c\x6d\xb7\xd9\xf8\x8d\xbd\x3e\x9c\xf5\x53\x68\x48\x14\x36\x8f\x09\xc5\x92\xf1\x21\xd1\x09\x07\x1c\xbe\xa7\x91\xf3\x6a\xc8\xc1\x57\xb0\xdd\xc5\xc6\x1d\xad\x76\x1d\xa8\x82\x0e\x4c\x38\xfe\xa5\x8c\xc5\x0a\x40\x5d\xa1\xbb\x98\xd1\xfb\x74\x61\xed\x1a\x98\xaf\x3c\x8c\x1e\xe3\xc2\x92\x29\x74\x3e\x99\xd0\xf9\x41\x50\xd0\x38\x4b\x57\x7e\x5b\x7a\x0e\xe6\xce\x4e\xd7\x19\x35\x57\xbb\x3c\x3c\xd2\x5e\x4f\x4b\x4c\xf7\x0f\x4d\x2b\x91\x5d\x94\xa6\x95\xc8\x69\x25\x72\x5a\x89\x7c\xb8\x95\xc8\x07\x80\x8c\xda\x9c\x64\x7b\xb7\x71\xdf\x57\x12\x4b\x9a\x1f\x72\x0c\x13\x03\xad\x3c\xd5\x4e\xde\x8e\x57\x13\x6d\x34\x86\xcf\x97\xe6\xa4\x68\xc4\xb0\xf6\xc9\xc2\xeb\x8d\x0b\xd7\xcd\xfe\xba\xa6\xf5\x30\xeb\x30\x33\xbe\xc7\x56\x27\xab\x08\xd9\x6d\xbb\x09\xee\x7c\x2d\xcf\xee\x87\x38\xac\xc8\xdd\x90\x9a\x58\x4a\x4e\x96\xa9\x79\x79\xf3\xde\x20\xf0\x96\xe3\x24\x19\xeb\xba\xf2\x53\x19\xab\x12\xaf\x47\xb3\xa0\x3e\xef\x9b\x8d\x6d\x6d\x7b\xde\x3b\x3b\x1a\xc0\x3f\x95\x7e\xed\x78\xfb\x76\xb8\xaf\xb3\xdd\xc5\xeb\x95\xed\x5a\x62\x41\x82\xb3\x54\x6e\x80\x4a\x92\x6f\x36\xbd\x34\xae\xde\x6f\xa4\xc0\xbc\x08\xe3\x84\xfc\x1d\xb6\xe3\xd0\x62\x38\x95\x9b\x57\xe7\x71\x12\x91\x80\xc8\x31\x69\x5e\x60\x21\x6e\x19\x0f\xc7\xa4\x79\x96\x28\x3e\x47\x54\x65\x41\x36\x08\x40\x88\x1f\x58\x08\x56\xaa\xd5\xbf\xaf\xad\x96\xd7\xd6\xcf\x87\xf5\x34\x0f\x71\x93\x6e\x26\xed\x98\x5b\x9f\x4f\xcf\x95\x34\xc6\xd7\x11\xfa\xb0\x81\x22\x1a\xdb\xdf\x8e\xdc\xc3\xb9\xf8\xdd\x5d\x3c\x74\xe6\xea\xb7\x8b\xbf\xf5\x6e\xb3\x46\x2e\x64\xf4\xab\x3c\x4e\xcf\x36\x1d\xfe\xfa\xb8\x36\xba\x8a\xd8\xad\xf6\xc6\x41\x2a\x37\x8c\x17\x0f\xda\xfe\xda\xe7\x65\xbc\x71\x2c\x36\x57\x8a\x47\x12\x4c\xf1\xbd\x77\x6b\xa4\x50\x7e\x77\x7b\x22\x60\x89\xef\xcd\xf5\xb9\x0c\x97\x79\x0d\x2b\x35\x43\xcb\x3d\x24\xf1\x78\xfc\xf8\xcb\x1f\x15\x06\xe2\x78\xd8\x51\x21\xd9\x1f\xf0\xf5\x8f\x86\xa4\x50\xfa\xb1\x47\x43\xa5\xdd\x69\x14\xe8\xa3\xc0\x86\x91\xa7\x81\x50\xb4\x7c\xc0\x81\x80\x77\x7a\x9f\xc6\xc2\xa9\x8c\x05\x33\xb0\x3b\x31\xa4\xf4\xd7\x1b\x26\x55\x97\x7c\x65\xf8\x69\x1a\x84\x8e\x41\x78\xd9\xec\xc5\x11\x16\x1e\x74\x91\xf5\x56\xf5\x57\x49\x47\x5c\x92\xa9\x1e\x99\x36\xf4\xdb\xb1\x0e\xd3\x78\x02\xb0\x9b\x25\xcb\xe9\xe9\x1d\x0d\x44\x01\x42\x08\x91\x64\xd9\xdd\x37\x08\x17\xef\xf9\xe5\x0f\xbd\x46\x91\xf5\xf9\x89\x92\x37\xdd\x89\x59\x44\x1f\x9c\xee\x34\x1e\xbe\x47\x83\x32\x72\x8e\x37\xdf\xac\x69\x38\xef\x75\xb0\xda\xdb\xac\x83\x94\x2f\x39\xa6\x62\x05\x1c\x25\x9c\x49\x16\xb0\xa8\x3c\xc7\x7e\x76\x71\x3e\x6f\xb5\x24\xe7\xe8\xb7\xb9\xc7\x6c\x43\x92\xee\x21\xd4\x17\xa1\x7f\xba\x35\xfe\xae\x39\xbc\xde\xba\x69\xd9\x8e\xe1\x62\xde\x64\x7d\x16\x88\x1b\xed\x29\x11\xfd\x4f\xa9\xff\x99\x90\xc4\xf6\xf4\xf9\x6e\xe9\x28\x23\xd7\xca\xe5\xee\xee\x9f\x63\xb1\x5b\xfb\x10\xd7\x2f\x1d\xf2\xe3\xbf\xb9\xb5\x6f\xa4\x6d\x7d\x25\x79\xfb\x24\x31\xea\x56\xbe\x5d\x53\xcd\x2d\x36\xa3\x6d\xdf\xab\x1c\xb8\x6d\x6f\xc0\x98\xa7\xdd\xaa\x86\x8c\x1d\x39\xa3\x9d\x70\x2b\x9b\x68\xd9\xfd\x33\xfe\xa9\xb6\x4a\x2e\x63\x0f\xcf\x68\x27\xd9\x4c\xb9\x46\x6d\xcb\xbe\xa1\xa8\xd6\x5f\xc6\xd6\x9f\xf1\x4f\xf4\xd4\xb4\x78\xd0\xd6\xf4\x13\x3c\x3b\xac\xd0\xdc\x90\x34\xda\xc9\xb4\x9a\x1a\x8d\xbd\x93\x87\xd4\xe2\x21\x1b\xb3\x2b\xd1\xbe\xe7\x69\xd4\x53\x67\xd5\x40\xa0\xe3\x19\x3f\x6d\x1a\xbc\x0e\x86\x3c\x10\xb4\x3d\x2a\xcd\x78\x32\xe6\xab\xbd\x36\xc9\xf4\x3a\x58\xae\xc3\xf4\x47\xea\xbf\xfb\x47\xff\x0d\x00\x00\xff\xff\xd2\x32\x5a\x28\x38\x9d\x00\x00") - -func v2SchemaJsonBytes() ([]byte, error) { - return bindataRead( - _v2SchemaJson, - "v2/schema.json", - ) -} - -func v2SchemaJson() (*asset, error) { - bytes, err := v2SchemaJsonBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "v2/schema.json", size: 40248, mode: os.FileMode(0640), modTime: time.Unix(1568964748, 0)} - a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xab, 0x88, 0x5e, 0xf, 0xbf, 0x17, 0x74, 0x0, 0xb2, 0x5a, 0x7f, 0xbc, 0x58, 0xcd, 0xc, 0x25, 0x73, 0xd5, 0x29, 0x1c, 0x7a, 0xd0, 0xce, 0x79, 0xd4, 0x89, 0x31, 0x27, 0x90, 0xf2, 0xff, 0xe6}} - return a, nil -} - -// Asset loads and returns the asset for the given name. -// It returns an error if the asset could not be found or -// could not be loaded. -func Asset(name string) ([]byte, error) { - canonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[canonicalName]; ok { - a, err := f() - if err != nil { - return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) - } - return a.bytes, nil - } - return nil, fmt.Errorf("Asset %s not found", name) -} - -// AssetString returns the asset contents as a string (instead of a []byte). -func AssetString(name string) (string, error) { - data, err := Asset(name) - return string(data), err -} - -// MustAsset is like Asset but panics when Asset would return an error. -// It simplifies safe initialization of global variables. -func MustAsset(name string) []byte { - a, err := Asset(name) - if err != nil { - panic("asset: Asset(" + name + "): " + err.Error()) - } - - return a -} - -// MustAssetString is like AssetString but panics when Asset would return an -// error. It simplifies safe initialization of global variables. -func MustAssetString(name string) string { - return string(MustAsset(name)) -} - -// AssetInfo loads and returns the asset info for the given name. -// It returns an error if the asset could not be found or -// could not be loaded. -func AssetInfo(name string) (os.FileInfo, error) { - canonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[canonicalName]; ok { - a, err := f() - if err != nil { - return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) - } - return a.info, nil - } - return nil, fmt.Errorf("AssetInfo %s not found", name) -} - -// AssetDigest returns the digest of the file with the given name. It returns an -// error if the asset could not be found or the digest could not be loaded. -func AssetDigest(name string) ([sha256.Size]byte, error) { - canonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[canonicalName]; ok { - a, err := f() - if err != nil { - return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s can't read by error: %v", name, err) - } - return a.digest, nil - } - return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s not found", name) -} - -// Digests returns a map of all known files and their checksums. -func Digests() (map[string][sha256.Size]byte, error) { - mp := make(map[string][sha256.Size]byte, len(_bindata)) - for name := range _bindata { - a, err := _bindata[name]() - if err != nil { - return nil, err - } - mp[name] = a.digest - } - return mp, nil -} - -// AssetNames returns the names of the assets. -func AssetNames() []string { - names := make([]string, 0, len(_bindata)) - for name := range _bindata { - names = append(names, name) - } - return names -} - -// _bindata is a table, holding each asset generator, mapped to its name. -var _bindata = map[string]func() (*asset, error){ - "jsonschema-draft-04.json": jsonschemaDraft04Json, - - "v2/schema.json": v2SchemaJson, -} - -// AssetDir returns the file names below a certain -// directory embedded in the file by go-bindata. -// For example if you run go-bindata on data/... and data contains the -// following hierarchy: -// data/ -// foo.txt -// img/ -// a.png -// b.png -// then AssetDir("data") would return []string{"foo.txt", "img"}, -// AssetDir("data/img") would return []string{"a.png", "b.png"}, -// AssetDir("foo.txt") and AssetDir("notexist") would return an error, and -// AssetDir("") will return []string{"data"}. -func AssetDir(name string) ([]string, error) { - node := _bintree - if len(name) != 0 { - canonicalName := strings.Replace(name, "\\", "/", -1) - pathList := strings.Split(canonicalName, "/") - for _, p := range pathList { - node = node.Children[p] - if node == nil { - return nil, fmt.Errorf("Asset %s not found", name) - } - } - } - if node.Func != nil { - return nil, fmt.Errorf("Asset %s not found", name) - } - rv := make([]string, 0, len(node.Children)) - for childName := range node.Children { - rv = append(rv, childName) - } - return rv, nil -} - -type bintree struct { - Func func() (*asset, error) - Children map[string]*bintree -} - -var _bintree = &bintree{nil, map[string]*bintree{ - "jsonschema-draft-04.json": {jsonschemaDraft04Json, map[string]*bintree{}}, - "v2": {nil, map[string]*bintree{ - "schema.json": {v2SchemaJson, map[string]*bintree{}}, - }}, -}} - -// RestoreAsset restores an asset under the given directory. -func RestoreAsset(dir, name string) error { - data, err := Asset(name) - if err != nil { - return err - } - info, err := AssetInfo(name) - if err != nil { - return err - } - err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) - if err != nil { - return err - } - err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) - if err != nil { - return err - } - return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) -} - -// RestoreAssets restores an asset under the given directory recursively. -func RestoreAssets(dir, name string) error { - children, err := AssetDir(name) - // File - if err != nil { - return RestoreAsset(dir, name) - } - // Dir - for _, child := range children { - err = RestoreAssets(dir, filepath.Join(name, child)) - if err != nil { - return err - } - } - return nil -} - -func _filePath(dir, name string) string { - canonicalName := strings.Replace(name, "\\", "/", -1) - return filepath.Join(append([]string{dir}, strings.Split(canonicalName, "/")...)...) -} diff --git a/vendor/github.com/go-openapi/spec/embed.go b/vendor/github.com/go-openapi/spec/embed.go new file mode 100644 index 00000000..1f428475 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/embed.go @@ -0,0 +1,17 @@ +package spec + +import ( + "embed" + "path" +) + +//go:embed schemas/*.json schemas/*/*.json +var assets embed.FS + +func jsonschemaDraft04JSONBytes() ([]byte, error) { + return assets.ReadFile(path.Join("schemas", "jsonschema-draft-04.json")) +} + +func v2SchemaJSONBytes() ([]byte, error) { + return assets.ReadFile(path.Join("schemas", "v2", "schema.json")) +} diff --git a/vendor/github.com/go-openapi/spec/expander.go b/vendor/github.com/go-openapi/spec/expander.go index d4ea889d..fae9c124 100644 --- a/vendor/github.com/go-openapi/spec/expander.go +++ b/vendor/github.com/go-openapi/spec/expander.go @@ -27,7 +27,6 @@ import ( // all relative $ref's will be resolved from there. // // PathLoader injects a document loading method. By default, this resolves to the function provided by the SpecLoader package variable. -// type ExpandOptions struct { RelativeBase string // the path to the root document to expand. This is a file, not a directory SkipSchemas bool // do not expand schemas, just paths, parameters and responses @@ -103,15 +102,21 @@ const rootBase = ".root" // baseForRoot loads in the cache the root document and produces a fake ".root" base path entry // for further $ref resolution -// -// Setting the cache is optional and this parameter may safely be left to nil. func baseForRoot(root interface{}, cache ResolutionCache) string { + // cache the root document to resolve $ref's + normalizedBase := normalizeBase(rootBase) + if root == nil { - return "" + // ensure that we never leave a nil root: always cache the root base pseudo-document + cachedRoot, found := cache.Get(normalizedBase) + if found && cachedRoot != nil { + // the cache is already preloaded with a root + return normalizedBase + } + + root = map[string]interface{}{} } - // cache the root document to resolve $ref's - normalizedBase := normalizeBase(rootBase) cache.Set(normalizedBase, root) return normalizedBase diff --git a/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go b/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go index 2df07231..f19f1a8f 100644 --- a/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go +++ b/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go @@ -40,5 +40,5 @@ func repairURI(in string) (*url.URL, string) { return u, "" } -func fixWindowsURI(u *url.URL, in string) { +func fixWindowsURI(_ *url.URL, _ string) { } diff --git a/vendor/github.com/go-openapi/spec/operation.go b/vendor/github.com/go-openapi/spec/operation.go index 995ce6ac..a69cca88 100644 --- a/vendor/github.com/go-openapi/spec/operation.go +++ b/vendor/github.com/go-openapi/spec/operation.go @@ -217,9 +217,12 @@ func (o *Operation) AddParam(param *Parameter) *Operation { for i, p := range o.Parameters { if p.Name == param.Name && p.In == param.In { - params := append(o.Parameters[:i], *param) + params := make([]Parameter, 0, len(o.Parameters)+1) + params = append(params, o.Parameters[:i]...) + params = append(params, *param) params = append(params, o.Parameters[i+1:]...) o.Parameters = params + return o } } diff --git a/vendor/github.com/go-openapi/spec/parameter.go b/vendor/github.com/go-openapi/spec/parameter.go index 2b2b89b6..bd4f1cdb 100644 --- a/vendor/github.com/go-openapi/spec/parameter.go +++ b/vendor/github.com/go-openapi/spec/parameter.go @@ -84,27 +84,27 @@ type ParamProps struct { // Parameter a unique parameter is defined by a combination of a [name](#parameterName) and [location](#parameterIn). // // There are five possible parameter types. -// * Path - Used together with [Path Templating](#pathTemplating), where the parameter value is actually part -// of the operation's URL. This does not include the host or base path of the API. For example, in `/items/{itemId}`, -// the path parameter is `itemId`. -// * Query - Parameters that are appended to the URL. For example, in `/items?id=###`, the query parameter is `id`. -// * Header - Custom headers that are expected as part of the request. -// * Body - The payload that's appended to the HTTP request. Since there can only be one payload, there can only be -// _one_ body parameter. The name of the body parameter has no effect on the parameter itself and is used for -// documentation purposes only. Since Form parameters are also in the payload, body and form parameters cannot exist -// together for the same operation. -// * Form - Used to describe the payload of an HTTP request when either `application/x-www-form-urlencoded` or -// `multipart/form-data` are used as the content type of the request (in Swagger's definition, -// the [`consumes`](#operationConsumes) property of an operation). This is the only parameter type that can be used -// to send files, thus supporting the `file` type. Since form parameters are sent in the payload, they cannot be -// declared together with a body parameter for the same operation. Form parameters have a different format based on -// the content-type used (for further details, consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4). -// * `application/x-www-form-urlencoded` - Similar to the format of Query parameters but as a payload. -// For example, `foo=1&bar=swagger` - both `foo` and `bar` are form parameters. This is normally used for simple -// parameters that are being transferred. -// * `multipart/form-data` - each parameter takes a section in the payload with an internal header. -// For example, for the header `Content-Disposition: form-data; name="submit-name"` the name of the parameter is -// `submit-name`. This type of form parameters is more commonly used for file transfers. +// - Path - Used together with [Path Templating](#pathTemplating), where the parameter value is actually part +// of the operation's URL. This does not include the host or base path of the API. For example, in `/items/{itemId}`, +// the path parameter is `itemId`. +// - Query - Parameters that are appended to the URL. For example, in `/items?id=###`, the query parameter is `id`. +// - Header - Custom headers that are expected as part of the request. +// - Body - The payload that's appended to the HTTP request. Since there can only be one payload, there can only be +// _one_ body parameter. The name of the body parameter has no effect on the parameter itself and is used for +// documentation purposes only. Since Form parameters are also in the payload, body and form parameters cannot exist +// together for the same operation. +// - Form - Used to describe the payload of an HTTP request when either `application/x-www-form-urlencoded` or +// `multipart/form-data` are used as the content type of the request (in Swagger's definition, +// the [`consumes`](#operationConsumes) property of an operation). This is the only parameter type that can be used +// to send files, thus supporting the `file` type. Since form parameters are sent in the payload, they cannot be +// declared together with a body parameter for the same operation. Form parameters have a different format based on +// the content-type used (for further details, consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4). +// - `application/x-www-form-urlencoded` - Similar to the format of Query parameters but as a payload. +// For example, `foo=1&bar=swagger` - both `foo` and `bar` are form parameters. This is normally used for simple +// parameters that are being transferred. +// - `multipart/form-data` - each parameter takes a section in the payload with an internal header. +// For example, for the header `Content-Disposition: form-data; name="submit-name"` the name of the parameter is +// `submit-name`. This type of form parameters is more commonly used for file transfers. // // For more information: http://goo.gl/8us55a#parameterObject type Parameter struct { diff --git a/vendor/github.com/go-openapi/spec/schema_loader.go b/vendor/github.com/go-openapi/spec/schema_loader.go index b81175af..0059b99a 100644 --- a/vendor/github.com/go-openapi/spec/schema_loader.go +++ b/vendor/github.com/go-openapi/spec/schema_loader.go @@ -168,14 +168,7 @@ func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error) normalized := normalizeBase(pth) debugLog("loading doc from: %s", normalized) - unescaped, err := url.PathUnescape(normalized) - if err != nil { - return nil, url.URL{}, false, err - } - - u := url.URL{Path: unescaped} - - data, fromCache := r.cache.Get(u.RequestURI()) + data, fromCache := r.cache.Get(normalized) if fromCache { return data, toFetch, fromCache, nil } diff --git a/vendor/github.com/go-openapi/spec/schemas/jsonschema-draft-04.json b/vendor/github.com/go-openapi/spec/schemas/jsonschema-draft-04.json new file mode 100644 index 00000000..bcbb8474 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/schemas/jsonschema-draft-04.json @@ -0,0 +1,149 @@ +{ + "id": "http://json-schema.org/draft-04/schema#", + "$schema": "http://json-schema.org/draft-04/schema#", + "description": "Core schema meta-schema", + "definitions": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#" } + }, + "positiveInteger": { + "type": "integer", + "minimum": 0 + }, + "positiveIntegerDefault0": { + "allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ] + }, + "simpleTypes": { + "enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "minItems": 1, + "uniqueItems": true + } + }, + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "$schema": { + "type": "string" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": {}, + "multipleOf": { + "type": "number", + "minimum": 0, + "exclusiveMinimum": true + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "boolean", + "default": false + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "boolean", + "default": false + }, + "maxLength": { "$ref": "#/definitions/positiveInteger" }, + "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "additionalItems": { + "anyOf": [ + { "type": "boolean" }, + { "$ref": "#" } + ], + "default": {} + }, + "items": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/schemaArray" } + ], + "default": {} + }, + "maxItems": { "$ref": "#/definitions/positiveInteger" }, + "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "maxProperties": { "$ref": "#/definitions/positiveInteger" }, + "minProperties": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "required": { "$ref": "#/definitions/stringArray" }, + "additionalProperties": { + "anyOf": [ + { "type": "boolean" }, + { "$ref": "#" } + ], + "default": {} + }, + "definitions": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "properties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "dependencies": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/stringArray" } + ] + } + }, + "enum": { + "type": "array", + "minItems": 1, + "uniqueItems": true + }, + "type": { + "anyOf": [ + { "$ref": "#/definitions/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/definitions/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "format": { "type": "string" }, + "allOf": { "$ref": "#/definitions/schemaArray" }, + "anyOf": { "$ref": "#/definitions/schemaArray" }, + "oneOf": { "$ref": "#/definitions/schemaArray" }, + "not": { "$ref": "#" } + }, + "dependencies": { + "exclusiveMaximum": [ "maximum" ], + "exclusiveMinimum": [ "minimum" ] + }, + "default": {} +} diff --git a/vendor/github.com/go-openapi/spec/schemas/v2/schema.json b/vendor/github.com/go-openapi/spec/schemas/v2/schema.json new file mode 100644 index 00000000..ebe10ed3 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/schemas/v2/schema.json @@ -0,0 +1,1607 @@ +{ + "title": "A JSON Schema for Swagger 2.0 API.", + "id": "http://swagger.io/v2/schema.json#", + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "required": [ + "swagger", + "info", + "paths" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "swagger": { + "type": "string", + "enum": [ + "2.0" + ], + "description": "The Swagger version of this document." + }, + "info": { + "$ref": "#/definitions/info" + }, + "host": { + "type": "string", + "pattern": "^[^{}/ :\\\\]+(?::\\d+)?$", + "description": "The host (name or ip) of the API. Example: 'swagger.io'" + }, + "basePath": { + "type": "string", + "pattern": "^/", + "description": "The base path to the API. Example: '/api'." + }, + "schemes": { + "$ref": "#/definitions/schemesList" + }, + "consumes": { + "description": "A list of MIME types accepted by the API.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "produces": { + "description": "A list of MIME types the API can produce.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "paths": { + "$ref": "#/definitions/paths" + }, + "definitions": { + "$ref": "#/definitions/definitions" + }, + "parameters": { + "$ref": "#/definitions/parameterDefinitions" + }, + "responses": { + "$ref": "#/definitions/responseDefinitions" + }, + "security": { + "$ref": "#/definitions/security" + }, + "securityDefinitions": { + "$ref": "#/definitions/securityDefinitions" + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "definitions": { + "info": { + "type": "object", + "description": "General information about the API.", + "required": [ + "version", + "title" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "title": { + "type": "string", + "description": "A unique and precise title of the API." + }, + "version": { + "type": "string", + "description": "A semantic version number of the API." + }, + "description": { + "type": "string", + "description": "A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed." + }, + "termsOfService": { + "type": "string", + "description": "The terms of service for the API." + }, + "contact": { + "$ref": "#/definitions/contact" + }, + "license": { + "$ref": "#/definitions/license" + } + } + }, + "contact": { + "type": "object", + "description": "Contact information for the owners of the API.", + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The identifying name of the contact person/organization." + }, + "url": { + "type": "string", + "description": "The URL pointing to the contact information.", + "format": "uri" + }, + "email": { + "type": "string", + "description": "The email address of the contact person/organization.", + "format": "email" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "license": { + "type": "object", + "required": [ + "name" + ], + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The name of the license type. It's encouraged to use an OSI compatible license." + }, + "url": { + "type": "string", + "description": "The URL pointing to the license.", + "format": "uri" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "paths": { + "type": "object", + "description": "Relative paths to the individual endpoints. They must be relative to the 'basePath'.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + }, + "^/": { + "$ref": "#/definitions/pathItem" + } + }, + "additionalProperties": false + }, + "definitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "description": "One or more JSON objects describing the schemas being consumed and produced by the API." + }, + "parameterDefinitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/parameter" + }, + "description": "One or more JSON representations for parameters" + }, + "responseDefinitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/response" + }, + "description": "One or more JSON representations for responses" + }, + "externalDocs": { + "type": "object", + "additionalProperties": false, + "description": "information about external documentation", + "required": [ + "url" + ], + "properties": { + "description": { + "type": "string" + }, + "url": { + "type": "string", + "format": "uri" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "examples": { + "type": "object", + "additionalProperties": true + }, + "mimeType": { + "type": "string", + "description": "The MIME type of the HTTP message." + }, + "operation": { + "type": "object", + "required": [ + "responses" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "summary": { + "type": "string", + "description": "A brief summary of the operation." + }, + "description": { + "type": "string", + "description": "A longer description of the operation, GitHub Flavored Markdown is allowed." + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "operationId": { + "type": "string", + "description": "A unique identifier of the operation." + }, + "produces": { + "description": "A list of MIME types the API can produce.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "consumes": { + "description": "A list of MIME types the API can consume.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "parameters": { + "$ref": "#/definitions/parametersList" + }, + "responses": { + "$ref": "#/definitions/responses" + }, + "schemes": { + "$ref": "#/definitions/schemesList" + }, + "deprecated": { + "type": "boolean", + "default": false + }, + "security": { + "$ref": "#/definitions/security" + } + } + }, + "pathItem": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "$ref": { + "type": "string" + }, + "get": { + "$ref": "#/definitions/operation" + }, + "put": { + "$ref": "#/definitions/operation" + }, + "post": { + "$ref": "#/definitions/operation" + }, + "delete": { + "$ref": "#/definitions/operation" + }, + "options": { + "$ref": "#/definitions/operation" + }, + "head": { + "$ref": "#/definitions/operation" + }, + "patch": { + "$ref": "#/definitions/operation" + }, + "parameters": { + "$ref": "#/definitions/parametersList" + } + } + }, + "responses": { + "type": "object", + "description": "Response objects names can either be any valid HTTP status code or 'default'.", + "minProperties": 1, + "additionalProperties": false, + "patternProperties": { + "^([0-9]{3})$|^(default)$": { + "$ref": "#/definitions/responseValue" + }, + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "not": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + } + }, + "responseValue": { + "oneOf": [ + { + "$ref": "#/definitions/response" + }, + { + "$ref": "#/definitions/jsonReference" + } + ] + }, + "response": { + "type": "object", + "required": [ + "description" + ], + "properties": { + "description": { + "type": "string" + }, + "schema": { + "oneOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "$ref": "#/definitions/fileSchema" + } + ] + }, + "headers": { + "$ref": "#/definitions/headers" + }, + "examples": { + "$ref": "#/definitions/examples" + } + }, + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "headers": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/header" + } + }, + "header": { + "type": "object", + "additionalProperties": false, + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "string", + "number", + "integer", + "boolean", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "vendorExtension": { + "description": "Any property starting with x- is valid.", + "additionalProperties": true, + "additionalItems": true + }, + "bodyParameter": { + "type": "object", + "required": [ + "name", + "in", + "schema" + ], + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "body" + ] + }, + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "schema": { + "$ref": "#/definitions/schema" + } + }, + "additionalProperties": false + }, + "headerParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "header" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "queryParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "query" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "allowEmptyValue": { + "type": "boolean", + "default": false, + "description": "allows sending a parameter by name only or with an empty value." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormatWithMulti" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "formDataParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "formData" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "allowEmptyValue": { + "type": "boolean", + "default": false, + "description": "allows sending a parameter by name only or with an empty value." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array", + "file" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormatWithMulti" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "pathParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "required": [ + "required" + ], + "properties": { + "required": { + "type": "boolean", + "enum": [ + true + ], + "description": "Determines whether or not this parameter is required or optional." + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "path" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "nonBodyParameter": { + "type": "object", + "required": [ + "name", + "in", + "type" + ], + "oneOf": [ + { + "$ref": "#/definitions/headerParameterSubSchema" + }, + { + "$ref": "#/definitions/formDataParameterSubSchema" + }, + { + "$ref": "#/definitions/queryParameterSubSchema" + }, + { + "$ref": "#/definitions/pathParameterSubSchema" + } + ] + }, + "parameter": { + "oneOf": [ + { + "$ref": "#/definitions/bodyParameter" + }, + { + "$ref": "#/definitions/nonBodyParameter" + } + ] + }, + "schema": { + "type": "object", + "description": "A deterministic version of a JSON Schema object.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "$ref": { + "type": "string" + }, + "format": { + "type": "string" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "multipleOf": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" + }, + "maximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" + }, + "exclusiveMaximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" + }, + "minimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" + }, + "exclusiveMinimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" + }, + "maxLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "pattern": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" + }, + "maxItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "uniqueItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" + }, + "maxProperties": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minProperties": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "required": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" + }, + "enum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" + }, + "additionalProperties": { + "anyOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "boolean" + } + ], + "default": {} + }, + "type": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/type" + }, + "items": { + "anyOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + } + ], + "default": {} + }, + "allOf": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + }, + "properties": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "default": {} + }, + "discriminator": { + "type": "string" + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "xml": { + "$ref": "#/definitions/xml" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "example": {} + }, + "additionalProperties": false + }, + "fileSchema": { + "type": "object", + "description": "A deterministic version of a JSON Schema object.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "required": [ + "type" + ], + "properties": { + "format": { + "type": "string" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "required": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" + }, + "type": { + "type": "string", + "enum": [ + "file" + ] + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "example": {} + }, + "additionalProperties": false + }, + "primitivesItems": { + "type": "object", + "additionalProperties": false, + "properties": { + "type": { + "type": "string", + "enum": [ + "string", + "number", + "integer", + "boolean", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "security": { + "type": "array", + "items": { + "$ref": "#/definitions/securityRequirement" + }, + "uniqueItems": true + }, + "securityRequirement": { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + } + }, + "xml": { + "type": "object", + "additionalProperties": false, + "properties": { + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "prefix": { + "type": "string" + }, + "attribute": { + "type": "boolean", + "default": false + }, + "wrapped": { + "type": "boolean", + "default": false + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "tag": { + "type": "object", + "additionalProperties": false, + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "securityDefinitions": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "$ref": "#/definitions/basicAuthenticationSecurity" + }, + { + "$ref": "#/definitions/apiKeySecurity" + }, + { + "$ref": "#/definitions/oauth2ImplicitSecurity" + }, + { + "$ref": "#/definitions/oauth2PasswordSecurity" + }, + { + "$ref": "#/definitions/oauth2ApplicationSecurity" + }, + { + "$ref": "#/definitions/oauth2AccessCodeSecurity" + } + ] + } + }, + "basicAuthenticationSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "basic" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "apiKeySecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "name", + "in" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "apiKey" + ] + }, + "name": { + "type": "string" + }, + "in": { + "type": "string", + "enum": [ + "header", + "query" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2ImplicitSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "authorizationUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "implicit" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "authorizationUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2PasswordSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "password" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2ApplicationSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "application" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2AccessCodeSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "authorizationUrl", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "accessCode" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "authorizationUrl": { + "type": "string", + "format": "uri" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2Scopes": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "mediaTypeList": { + "type": "array", + "items": { + "$ref": "#/definitions/mimeType" + }, + "uniqueItems": true + }, + "parametersList": { + "type": "array", + "description": "The parameters needed to send a valid API call.", + "additionalItems": false, + "items": { + "oneOf": [ + { + "$ref": "#/definitions/parameter" + }, + { + "$ref": "#/definitions/jsonReference" + } + ] + }, + "uniqueItems": true + }, + "schemesList": { + "type": "array", + "description": "The transfer protocol of the API.", + "items": { + "type": "string", + "enum": [ + "http", + "https", + "ws", + "wss" + ] + }, + "uniqueItems": true + }, + "collectionFormat": { + "type": "string", + "enum": [ + "csv", + "ssv", + "tsv", + "pipes" + ], + "default": "csv" + }, + "collectionFormatWithMulti": { + "type": "string", + "enum": [ + "csv", + "ssv", + "tsv", + "pipes", + "multi" + ], + "default": "csv" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "multipleOf": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" + }, + "maximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" + }, + "exclusiveMaximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" + }, + "minimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" + }, + "exclusiveMinimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" + }, + "maxLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "pattern": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" + }, + "maxItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "uniqueItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" + }, + "enum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" + }, + "jsonReference": { + "type": "object", + "required": [ + "$ref" + ], + "additionalProperties": false, + "properties": { + "$ref": { + "type": "string" + } + } + } + } +} diff --git a/vendor/github.com/go-openapi/spec/spec.go b/vendor/github.com/go-openapi/spec/spec.go index 7d38b6e6..876aa127 100644 --- a/vendor/github.com/go-openapi/spec/spec.go +++ b/vendor/github.com/go-openapi/spec/spec.go @@ -26,7 +26,7 @@ import ( const ( // SwaggerSchemaURL the url for the swagger 2.0 schema to validate specs SwaggerSchemaURL = "http://swagger.io/v2/schema.json#" - // JSONSchemaURL the url for the json schema schema + // JSONSchemaURL the url for the json schema JSONSchemaURL = "http://json-schema.org/draft-04/schema#" ) @@ -41,7 +41,7 @@ func MustLoadJSONSchemaDraft04() *Schema { // JSONSchemaDraft04 loads the json schema document for json shema draft04 func JSONSchemaDraft04() (*Schema, error) { - b, err := Asset("jsonschema-draft-04.json") + b, err := jsonschemaDraft04JSONBytes() if err != nil { return nil, err } @@ -65,7 +65,7 @@ func MustLoadSwagger20Schema() *Schema { // Swagger20Schema loads the swagger 2.0 schema from the embedded assets func Swagger20Schema() (*Schema, error) { - b, err := Asset("v2/schema.json") + b, err := v2SchemaJSONBytes() if err != nil { return nil, err } diff --git a/vendor/github.com/go-openapi/spec/swagger.go b/vendor/github.com/go-openapi/spec/swagger.go index 44722ffd..1590fd17 100644 --- a/vendor/github.com/go-openapi/spec/swagger.go +++ b/vendor/github.com/go-openapi/spec/swagger.go @@ -253,7 +253,7 @@ func (s SchemaOrBool) MarshalJSON() ([]byte, error) { // UnmarshalJSON converts this bool or schema object from a JSON structure func (s *SchemaOrBool) UnmarshalJSON(data []byte) error { var nw SchemaOrBool - if len(data) >= 4 { + if len(data) > 0 { if data[0] == '{' { var sch Schema if err := json.Unmarshal(data, &sch); err != nil { @@ -261,7 +261,7 @@ func (s *SchemaOrBool) UnmarshalJSON(data []byte) error { } nw.Schema = &sch } - nw.Allows = !(data[0] == 'f' && data[1] == 'a' && data[2] == 'l' && data[3] == 's' && data[4] == 'e') + nw.Allows = !bytes.Equal(data, []byte("false")) } *s = nw return nil diff --git a/vendor/github.com/go-openapi/spec/url_go18.go b/vendor/github.com/go-openapi/spec/url_go18.go deleted file mode 100644 index 60b78515..00000000 --- a/vendor/github.com/go-openapi/spec/url_go18.go +++ /dev/null @@ -1,8 +0,0 @@ -//go:build !go1.19 -// +build !go1.19 - -package spec - -import "net/url" - -var parseURL = url.Parse diff --git a/vendor/github.com/go-openapi/spec/url_go19.go b/vendor/github.com/go-openapi/spec/url_go19.go index 392e3e63..5bdfe40b 100644 --- a/vendor/github.com/go-openapi/spec/url_go19.go +++ b/vendor/github.com/go-openapi/spec/url_go19.go @@ -1,6 +1,3 @@ -//go:build go1.19 -// +build go1.19 - package spec import "net/url" diff --git a/vendor/github.com/go-openapi/strfmt/.golangci.yml b/vendor/github.com/go-openapi/strfmt/.golangci.yml index be4899cb..22f8d21c 100644 --- a/vendor/github.com/go-openapi/strfmt/.golangci.yml +++ b/vendor/github.com/go-openapi/strfmt/.golangci.yml @@ -4,56 +4,58 @@ linters-settings: golint: min-confidence: 0 gocyclo: - min-complexity: 31 + min-complexity: 45 maligned: suggest-new: true dupl: - threshold: 100 + threshold: 200 goconst: min-len: 2 - min-occurrences: 4 + min-occurrences: 3 linters: - enable: - - revive - - goimports - - gosec + enable-all: true + disable: + - maligned - unparam - - unconvert - - predeclared - - prealloc - - misspell - - # disable: - # - maligned - # - lll - # - gochecknoinits - # - gochecknoglobals - # - godox - # - gocognit - # - whitespace - # - wsl - # - funlen - # - wrapcheck - # - testpackage - # - nlreturn - # - gofumpt - # - goerr113 - # - gci - # - gomnd - # - godot - # - exhaustivestruct - # - paralleltest - # - varnamelen - # - ireturn - # - exhaustruct - # #- thelper - -issues: - exclude-rules: - - path: bson.go - text: "should be .*ObjectID" - linters: - - golint - - stylecheck - + - lll + - gochecknoinits + - gochecknoglobals + - funlen + - godox + - gocognit + - whitespace + - wsl + - wrapcheck + - testpackage + - nlreturn + - gomnd + - exhaustivestruct + - goerr113 + - errorlint + - nestif + - godot + - gofumpt + - paralleltest + - tparallel + - thelper + - ifshort + - exhaustruct + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam + - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + - deadcode + - interfacer + - scopelint + - varcheck + - structcheck + - golint + - nosnakecase diff --git a/vendor/github.com/go-openapi/strfmt/README.md b/vendor/github.com/go-openapi/strfmt/README.md index 0cf89d77..f6b39c6c 100644 --- a/vendor/github.com/go-openapi/strfmt/README.md +++ b/vendor/github.com/go-openapi/strfmt/README.md @@ -1,8 +1,7 @@ -# Strfmt [![Build Status](https://travis-ci.org/go-openapi/strfmt.svg?branch=master)](https://travis-ci.org/go-openapi/strfmt) [![codecov](https://codecov.io/gh/go-openapi/strfmt/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/strfmt) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) - +# Strfmt [![Build Status](https://github.com/go-openapi/strfmt/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/strfmt/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/strfmt/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/strfmt) +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) [![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/strfmt/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/strfmt?status.svg)](http://godoc.org/github.com/go-openapi/strfmt) -[![GolangCI](https://golangci.com/badges/github.com/go-openapi/strfmt.svg)](https://golangci.com) [![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/strfmt)](https://goreportcard.com/report/github.com/go-openapi/strfmt) This package exposes a registry of data types to support string formats in the go-openapi toolkit. diff --git a/vendor/github.com/go-openapi/strfmt/bson.go b/vendor/github.com/go-openapi/strfmt/bson.go index a8a3604a..cfa9a526 100644 --- a/vendor/github.com/go-openapi/strfmt/bson.go +++ b/vendor/github.com/go-openapi/strfmt/bson.go @@ -39,10 +39,10 @@ func IsBSONObjectID(str string) bool { // ObjectId represents a BSON object ID (alias to go.mongodb.org/mongo-driver/bson/primitive.ObjectID) // // swagger:strfmt bsonobjectid -type ObjectId bsonprim.ObjectID //nolint:revive +type ObjectId bsonprim.ObjectID //nolint:revive,stylecheck // NewObjectId creates a ObjectId from a Hex String -func NewObjectId(hex string) ObjectId { //nolint:revive +func NewObjectId(hex string) ObjectId { //nolint:revive,stylecheck oid, err := bsonprim.ObjectIDFromHex(hex) if err != nil { panic(err) @@ -135,7 +135,7 @@ func (id *ObjectId) UnmarshalBSON(data []byte) error { // BSON document if the error is nil. func (id ObjectId) MarshalBSONValue() (bsontype.Type, []byte, error) { oid := bsonprim.ObjectID(id) - return bsontype.ObjectID, oid[:], nil + return bson.TypeObjectID, oid[:], nil } // UnmarshalBSONValue is an interface implemented by types that can unmarshal a diff --git a/vendor/github.com/go-openapi/strfmt/default.go b/vendor/github.com/go-openapi/strfmt/default.go index a89a4de3..28137140 100644 --- a/vendor/github.com/go-openapi/strfmt/default.go +++ b/vendor/github.com/go-openapi/strfmt/default.go @@ -25,6 +25,7 @@ import ( "strings" "github.com/asaskevich/govalidator" + "github.com/google/uuid" "go.mongodb.org/mongo-driver/bson" ) @@ -57,24 +58,35 @@ const ( // - long top-level domain names (e.g. example.london) are permitted // - symbol unicode points are permitted (e.g. emoji) (not for top-level domain) HostnamePattern = `^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z\p{L}]){2,63})$` + + // json null type + jsonNull = "null" +) + +const ( // UUIDPattern Regex for UUID that allows uppercase - UUIDPattern = `(?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{12}$` + // + // Deprecated: strfmt no longer uses regular expressions to validate UUIDs. + UUIDPattern = `(?i)(^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$)|(^[0-9a-f]{32}$)` + // UUID3Pattern Regex for UUID3 that allows uppercase - UUID3Pattern = `(?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?3[0-9a-f]{3}-?[0-9a-f]{4}-?[0-9a-f]{12}$` + // + // Deprecated: strfmt no longer uses regular expressions to validate UUIDs. + UUID3Pattern = `(?i)(^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$)|(^[0-9a-f]{12}3[0-9a-f]{3}?[0-9a-f]{16}$)` + // UUID4Pattern Regex for UUID4 that allows uppercase - UUID4Pattern = `(?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?4[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$` + // + // Deprecated: strfmt no longer uses regular expressions to validate UUIDs. + UUID4Pattern = `(?i)(^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$)|(^[0-9a-f]{12}4[0-9a-f]{3}[89ab][0-9a-f]{15}$)` + // UUID5Pattern Regex for UUID5 that allows uppercase - UUID5Pattern = `(?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?5[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$` - // json null type - jsonNull = "null" + // + // Deprecated: strfmt no longer uses regular expressions to validate UUIDs. + UUID5Pattern = `(?i)(^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$)|(^[0-9a-f]{12}5[0-9a-f]{3}[89ab][0-9a-f]{15}$)` ) var ( rxHostname = regexp.MustCompile(HostnamePattern) - rxUUID = regexp.MustCompile(UUIDPattern) - rxUUID3 = regexp.MustCompile(UUID3Pattern) - rxUUID4 = regexp.MustCompile(UUID4Pattern) - rxUUID5 = regexp.MustCompile(UUID5Pattern) ) // IsHostname returns true when the string is a valid hostname @@ -99,24 +111,28 @@ func IsHostname(str string) bool { return valid } -// IsUUID returns true is the string matches a UUID, upper case is allowed +// IsUUID returns true is the string matches a UUID (in any version, including v6 and v7), upper case is allowed func IsUUID(str string) bool { - return rxUUID.MatchString(str) + _, err := uuid.Parse(str) + return err == nil } -// IsUUID3 returns true is the string matches a UUID, upper case is allowed +// IsUUID3 returns true is the string matches a UUID v3, upper case is allowed func IsUUID3(str string) bool { - return rxUUID3.MatchString(str) + id, err := uuid.Parse(str) + return err == nil && id.Version() == uuid.Version(3) } -// IsUUID4 returns true is the string matches a UUID, upper case is allowed +// IsUUID4 returns true is the string matches a UUID v4, upper case is allowed func IsUUID4(str string) bool { - return rxUUID4.MatchString(str) + id, err := uuid.Parse(str) + return err == nil && id.Version() == uuid.Version(4) } -// IsUUID5 returns true is the string matches a UUID, upper case is allowed +// IsUUID5 returns true is the string matches a UUID v5, upper case is allowed func IsUUID5(str string) bool { - return rxUUID5.MatchString(str) + id, err := uuid.Parse(str) + return err == nil && id.Version() == uuid.Version(5) } // IsEmail validates an email address. diff --git a/vendor/github.com/go-openapi/strfmt/format.go b/vendor/github.com/go-openapi/strfmt/format.go index ad3b3c35..ce99ce52 100644 --- a/vendor/github.com/go-openapi/strfmt/format.go +++ b/vendor/github.com/go-openapi/strfmt/format.go @@ -94,7 +94,7 @@ func NewSeededFormats(seeds []knownFormat, normalizer NameNormalizer) Registry { } // MapStructureHookFunc is a decode hook function for mapstructure -func (f *defaultFormats) MapStructureHookFunc() mapstructure.DecodeHookFunc { //nolint:gocyclo,cyclop +func (f *defaultFormats) MapStructureHookFunc() mapstructure.DecodeHookFunc { return func(from reflect.Type, to reflect.Type, obj interface{}) (interface{}, error) { if from.Kind() != reflect.String { return obj, nil diff --git a/vendor/github.com/go-openapi/strfmt/time.go b/vendor/github.com/go-openapi/strfmt/time.go index 9bef4c3b..f08ba4da 100644 --- a/vendor/github.com/go-openapi/strfmt/time.go +++ b/vendor/github.com/go-openapi/strfmt/time.go @@ -76,6 +76,8 @@ const ( ISO8601TimeWithReducedPrecisionLocaltime = "2006-01-02T15:04" // ISO8601TimeUniversalSortableDateTimePattern represents a ISO8601 universal sortable date time pattern. ISO8601TimeUniversalSortableDateTimePattern = "2006-01-02 15:04:05" + // short form of ISO8601TimeUniversalSortableDateTimePattern + ISO8601TimeUniversalSortableDateTimePatternShortForm = "2006-01-02" // DateTimePattern pattern to match for the date-time format from http://tools.ietf.org/html/rfc3339#section-5.6 DateTimePattern = `^([0-9]{2}):([0-9]{2}):([0-9]{2})(.[0-9]+)?(z|([+-][0-9]{2}:[0-9]{2}))$` ) @@ -84,7 +86,7 @@ var ( rxDateTime = regexp.MustCompile(DateTimePattern) // DateTimeFormats is the collection of formats used by ParseDateTime() - DateTimeFormats = []string{RFC3339Micro, RFC3339MicroNoColon, RFC3339Millis, RFC3339MillisNoColon, time.RFC3339, time.RFC3339Nano, ISO8601LocalTime, ISO8601TimeWithReducedPrecision, ISO8601TimeWithReducedPrecisionLocaltime, ISO8601TimeUniversalSortableDateTimePattern} + DateTimeFormats = []string{RFC3339Micro, RFC3339MicroNoColon, RFC3339Millis, RFC3339MillisNoColon, time.RFC3339, time.RFC3339Nano, ISO8601LocalTime, ISO8601TimeWithReducedPrecision, ISO8601TimeWithReducedPrecisionLocaltime, ISO8601TimeUniversalSortableDateTimePattern, ISO8601TimeUniversalSortableDateTimePatternShortForm} // MarshalFormat sets the time resolution format used for marshaling time (set to milliseconds) MarshalFormat = RFC3339Millis @@ -245,7 +247,7 @@ func (t DateTime) MarshalBSONValue() (bsontype.Type, []byte, error) { buf := make([]byte, 8) binary.LittleEndian.PutUint64(buf, uint64(i64)) - return bsontype.DateTime, buf, nil + return bson.TypeDateTime, buf, nil } // UnmarshalBSONValue is an interface implemented by types that can unmarshal a @@ -253,7 +255,7 @@ func (t DateTime) MarshalBSONValue() (bsontype.Type, []byte, error) { // assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it // wishes to retain the data after returning. func (t *DateTime) UnmarshalBSONValue(tpe bsontype.Type, data []byte) error { - if tpe == bsontype.Null { + if tpe == bson.TypeNull { *t = DateTime{} return nil } diff --git a/vendor/github.com/go-openapi/swag/.gitignore b/vendor/github.com/go-openapi/swag/.gitignore index d69b53ac..c4b1b64f 100644 --- a/vendor/github.com/go-openapi/swag/.gitignore +++ b/vendor/github.com/go-openapi/swag/.gitignore @@ -2,3 +2,4 @@ secrets.yml vendor Godeps .idea +*.out diff --git a/vendor/github.com/go-openapi/swag/.golangci.yml b/vendor/github.com/go-openapi/swag/.golangci.yml index bf503e40..80e2be00 100644 --- a/vendor/github.com/go-openapi/swag/.golangci.yml +++ b/vendor/github.com/go-openapi/swag/.golangci.yml @@ -4,14 +4,14 @@ linters-settings: golint: min-confidence: 0 gocyclo: - min-complexity: 25 + min-complexity: 45 maligned: suggest-new: true dupl: - threshold: 100 + threshold: 200 goconst: min-len: 3 - min-occurrences: 2 + min-occurrences: 3 linters: enable-all: true @@ -20,35 +20,41 @@ linters: - lll - gochecknoinits - gochecknoglobals - - nlreturn - - testpackage + - funlen + - godox + - gocognit + - whitespace + - wsl - wrapcheck + - testpackage + - nlreturn - gomnd - - exhaustive - exhaustivestruct - goerr113 - - wsl - - whitespace - - gofumpt - - godot + - errorlint - nestif - - godox - - funlen - - gci - - gocognit + - godot + - gofumpt - paralleltest + - tparallel - thelper - ifshort - - gomoddirectives - - cyclop - - forcetypeassert - - ireturn - - tagliatelle - - varnamelen - - goimports - - tenv - - golint - exhaustruct - - nilnil + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + - deadcode + - interfacer + - scopelint + - varcheck + - structcheck + - golint - nosnakecase diff --git a/vendor/github.com/go-openapi/swag/README.md b/vendor/github.com/go-openapi/swag/README.md index 217f6fa5..a7292229 100644 --- a/vendor/github.com/go-openapi/swag/README.md +++ b/vendor/github.com/go-openapi/swag/README.md @@ -1,7 +1,8 @@ -# Swag [![Build Status](https://travis-ci.org/go-openapi/swag.svg?branch=master)](https://travis-ci.org/go-openapi/swag) [![codecov](https://codecov.io/gh/go-openapi/swag/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/swag) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +# Swag [![Build Status](https://github.com/go-openapi/swag/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/swag/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/swag/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/swag) +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) [![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/swag/master/LICENSE) -[![GoDoc](https://godoc.org/github.com/go-openapi/swag?status.svg)](http://godoc.org/github.com/go-openapi/swag) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/swag.svg)](https://pkg.go.dev/github.com/go-openapi/swag) [![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/swag)](https://goreportcard.com/report/github.com/go-openapi/swag) Contains a bunch of helper functions for go-openapi and go-swagger projects. @@ -18,4 +19,5 @@ You may also use it standalone for your projects. This repo has only few dependencies outside of the standard library: -* YAML utilities depend on gopkg.in/yaml.v2 +* YAML utilities depend on `gopkg.in/yaml.v3` +* `github.com/mailru/easyjson v0.7.7` diff --git a/vendor/github.com/go-openapi/swag/post_go19.go b/vendor/github.com/go-openapi/swag/initialism_index.go similarity index 98% rename from vendor/github.com/go-openapi/swag/post_go19.go rename to vendor/github.com/go-openapi/swag/initialism_index.go index 7c7da9c0..03555184 100644 --- a/vendor/github.com/go-openapi/swag/post_go19.go +++ b/vendor/github.com/go-openapi/swag/initialism_index.go @@ -12,9 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build go1.9 -// +build go1.9 - package swag import ( diff --git a/vendor/github.com/go-openapi/swag/loading.go b/vendor/github.com/go-openapi/swag/loading.go index 00038c37..783442fd 100644 --- a/vendor/github.com/go-openapi/swag/loading.go +++ b/vendor/github.com/go-openapi/swag/loading.go @@ -21,6 +21,7 @@ import ( "net/http" "net/url" "os" + "path" "path/filepath" "runtime" "strings" @@ -40,43 +41,97 @@ var LoadHTTPBasicAuthPassword = "" var LoadHTTPCustomHeaders = map[string]string{} // LoadFromFileOrHTTP loads the bytes from a file or a remote http server based on the path passed in -func LoadFromFileOrHTTP(path string) ([]byte, error) { - return LoadStrategy(path, os.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(path) +func LoadFromFileOrHTTP(pth string) ([]byte, error) { + return LoadStrategy(pth, os.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(pth) } // LoadFromFileOrHTTPWithTimeout loads the bytes from a file or a remote http server based on the path passed in // timeout arg allows for per request overriding of the request timeout -func LoadFromFileOrHTTPWithTimeout(path string, timeout time.Duration) ([]byte, error) { - return LoadStrategy(path, os.ReadFile, loadHTTPBytes(timeout))(path) +func LoadFromFileOrHTTPWithTimeout(pth string, timeout time.Duration) ([]byte, error) { + return LoadStrategy(pth, os.ReadFile, loadHTTPBytes(timeout))(pth) } -// LoadStrategy returns a loader function for a given path or uri -func LoadStrategy(path string, local, remote func(string) ([]byte, error)) func(string) ([]byte, error) { - if strings.HasPrefix(path, "http") { +// LoadStrategy returns a loader function for a given path or URI. +// +// The load strategy returns the remote load for any path starting with `http`. +// So this works for any URI with a scheme `http` or `https`. +// +// The fallback strategy is to call the local loader. +// +// The local loader takes a local file system path (absolute or relative) as argument, +// or alternatively a `file://...` URI, **without host** (see also below for windows). +// +// There are a few liberalities, initially intended to be tolerant regarding the URI syntax, +// especially on windows. +// +// Before the local loader is called, the given path is transformed: +// - percent-encoded characters are unescaped +// - simple paths (e.g. `./folder/file`) are passed as-is +// - on windows, occurrences of `/` are replaced by `\`, so providing a relative path such a `folder/file` works too. +// +// For paths provided as URIs with the "file" scheme, please note that: +// - `file://` is simply stripped. +// This means that the host part of the URI is not parsed at all. +// For example, `file:///folder/file" becomes "/folder/file`, +// but `file://localhost/folder/file` becomes `localhost/folder/file` on unix systems. +// Similarly, `file://./folder/file` yields `./folder/file`. +// - on windows, `file://...` can take a host so as to specify an UNC share location. +// +// Reminder about windows-specifics: +// - `file://host/folder/file` becomes an UNC path like `\\host\folder\file` (no port specification is supported) +// - `file:///c:/folder/file` becomes `C:\folder\file` +// - `file://c:/folder/file` is tolerated (without leading `/`) and becomes `c:\folder\file` +func LoadStrategy(pth string, local, remote func(string) ([]byte, error)) func(string) ([]byte, error) { + if strings.HasPrefix(pth, "http") { return remote } - return func(pth string) ([]byte, error) { - upth, err := pathUnescape(pth) + + return func(p string) ([]byte, error) { + upth, err := url.PathUnescape(p) if err != nil { return nil, err } - if strings.HasPrefix(pth, `file://`) { - if runtime.GOOS == "windows" { - // support for canonical file URIs on windows. - // Zero tolerance here for dodgy URIs. - u, _ := url.Parse(upth) - if u.Host != "" { - // assume UNC name (volume share) - // file://host/share/folder\... ==> \\host\share\path\folder - // NOTE: UNC port not yet supported - upth = strings.Join([]string{`\`, u.Host, u.Path}, `\`) - } else { - // file:///c:/folder/... ==> just remove the leading slash - upth = strings.TrimPrefix(upth, `file:///`) - } - } else { - upth = strings.TrimPrefix(upth, `file://`) + if !strings.HasPrefix(p, `file://`) { + // regular file path provided: just normalize slashes + return local(filepath.FromSlash(upth)) + } + + if runtime.GOOS != "windows" { + // crude processing: this leaves full URIs with a host with a (mostly) unexpected result + upth = strings.TrimPrefix(upth, `file://`) + + return local(filepath.FromSlash(upth)) + } + + // windows-only pre-processing of file://... URIs + + // support for canonical file URIs on windows. + u, err := url.Parse(filepath.ToSlash(upth)) + if err != nil { + return nil, err + } + + if u.Host != "" { + // assume UNC name (volume share) + // NOTE: UNC port not yet supported + + // when the "host" segment is a drive letter: + // file://C:/folder/... => C:\folder + upth = path.Clean(strings.Join([]string{u.Host, u.Path}, `/`)) + if !strings.HasSuffix(u.Host, ":") && u.Host[0] != '.' { + // tolerance: if we have a leading dot, this can't be a host + // file://host/share/folder\... ==> \\host\share\path\folder + upth = "//" + upth + } + } else { + // no host, let's figure out if this is a drive letter + upth = strings.TrimPrefix(upth, `file://`) + first, _, _ := strings.Cut(strings.TrimPrefix(u.Path, "/"), "/") + if strings.HasSuffix(first, ":") { + // drive letter in the first segment: + // file:///c:/folder/... ==> strip the leading slash + upth = strings.TrimPrefix(upth, `/`) } } diff --git a/vendor/github.com/go-openapi/swag/post_go18.go b/vendor/github.com/go-openapi/swag/post_go18.go deleted file mode 100644 index f5228b82..00000000 --- a/vendor/github.com/go-openapi/swag/post_go18.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build go1.8 -// +build go1.8 - -package swag - -import "net/url" - -func pathUnescape(path string) (string, error) { - return url.PathUnescape(path) -} diff --git a/vendor/github.com/go-openapi/swag/pre_go18.go b/vendor/github.com/go-openapi/swag/pre_go18.go deleted file mode 100644 index 2757d9b9..00000000 --- a/vendor/github.com/go-openapi/swag/pre_go18.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !go1.8 -// +build !go1.8 - -package swag - -import "net/url" - -func pathUnescape(path string) (string, error) { - return url.QueryUnescape(path) -} diff --git a/vendor/github.com/go-openapi/swag/pre_go19.go b/vendor/github.com/go-openapi/swag/pre_go19.go deleted file mode 100644 index 0565db37..00000000 --- a/vendor/github.com/go-openapi/swag/pre_go19.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !go1.9 -// +build !go1.9 - -package swag - -import ( - "sort" - "sync" -) - -// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms. -// Before go1.9, this may be implemented with a mutex on the map. -type indexOfInitialisms struct { - getMutex *sync.Mutex - index map[string]bool -} - -func newIndexOfInitialisms() *indexOfInitialisms { - return &indexOfInitialisms{ - getMutex: new(sync.Mutex), - index: make(map[string]bool, 50), - } -} - -func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms { - m.getMutex.Lock() - defer m.getMutex.Unlock() - for k, v := range initial { - m.index[k] = v - } - return m -} - -func (m *indexOfInitialisms) isInitialism(key string) bool { - m.getMutex.Lock() - defer m.getMutex.Unlock() - _, ok := m.index[key] - return ok -} - -func (m *indexOfInitialisms) add(key string) *indexOfInitialisms { - m.getMutex.Lock() - defer m.getMutex.Unlock() - m.index[key] = true - return m -} - -func (m *indexOfInitialisms) sorted() (result []string) { - m.getMutex.Lock() - defer m.getMutex.Unlock() - for k := range m.index { - result = append(result, k) - } - sort.Sort(sort.Reverse(byInitialism(result))) - return -} diff --git a/vendor/github.com/go-openapi/swag/util.go b/vendor/github.com/go-openapi/swag/util.go index d971fbe3..0413f744 100644 --- a/vendor/github.com/go-openapi/swag/util.go +++ b/vendor/github.com/go-openapi/swag/util.go @@ -174,7 +174,7 @@ func (s byInitialism) Less(i, j int) bool { // Removes leading whitespaces func trim(str string) string { - return strings.Trim(str, " ") + return strings.TrimSpace(str) } // Shortcut to strings.ToUpper() @@ -231,7 +231,7 @@ func ToHumanNameLower(name string) string { if !w.IsInitialism() { out = append(out, lower(w.GetOriginal())) } else { - out = append(out, w.GetOriginal()) + out = append(out, trim(w.GetOriginal())) } } @@ -244,7 +244,7 @@ func ToHumanNameTitle(name string) string { out := make([]string, 0, len(in)) for _, w := range in { - original := w.GetOriginal() + original := trim(w.GetOriginal()) if !w.IsInitialism() { out = append(out, Camelize(original)) } else { @@ -264,7 +264,7 @@ func ToJSONName(name string) string { out = append(out, lower(w)) continue } - out = append(out, Camelize(w)) + out = append(out, Camelize(trim(w))) } return strings.Join(out, "") } @@ -343,7 +343,7 @@ type zeroable interface { func IsZero(data interface{}) bool { v := reflect.ValueOf(data) // check for nil data - switch v.Kind() { + switch v.Kind() { //nolint:exhaustive case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: if v.IsNil() { return true @@ -356,7 +356,7 @@ func IsZero(data interface{}) bool { } // continue with slightly more complex reflection - switch v.Kind() { + switch v.Kind() { //nolint:exhaustive case reflect.String: return v.Len() == 0 case reflect.Bool: diff --git a/vendor/github.com/go-openapi/swag/yaml.go b/vendor/github.com/go-openapi/swag/yaml.go index f09ee609..a8c4e359 100644 --- a/vendor/github.com/go-openapi/swag/yaml.go +++ b/vendor/github.com/go-openapi/swag/yaml.go @@ -18,6 +18,8 @@ import ( "encoding/json" "fmt" "path/filepath" + "reflect" + "sort" "strconv" "github.com/mailru/easyjson/jlexer" @@ -147,7 +149,7 @@ func yamlScalar(node *yaml.Node) (interface{}, error) { case yamlTimestamp: return node.Value, nil case yamlNull: - return nil, nil + return nil, nil //nolint:nilnil default: return nil, fmt.Errorf("YAML tag %q is not supported", node.LongTag()) } @@ -245,7 +247,27 @@ func (s JSONMapSlice) MarshalYAML() (interface{}, error) { return yaml.Marshal(&n) } +func isNil(input interface{}) bool { + if input == nil { + return true + } + kind := reflect.TypeOf(input).Kind() + switch kind { //nolint:exhaustive + case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Chan: + return reflect.ValueOf(input).IsNil() + default: + return false + } +} + func json2yaml(item interface{}) (*yaml.Node, error) { + if isNil(item) { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Value: "null", + }, nil + } + switch val := item.(type) { case JSONMapSlice: var n yaml.Node @@ -265,7 +287,14 @@ func json2yaml(item interface{}) (*yaml.Node, error) { case map[string]interface{}: var n yaml.Node n.Kind = yaml.MappingNode - for k, v := range val { + keys := make([]string, 0, len(val)) + for k := range val { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + v := val[k] childNode, err := json2yaml(v) if err != nil { return nil, err @@ -318,8 +347,9 @@ func json2yaml(item interface{}) (*yaml.Node, error) { Tag: yamlBoolScalar, Value: strconv.FormatBool(val), }, nil + default: + return nil, fmt.Errorf("unhandled type: %T", val) } - return nil, nil } // JSONMapItem represents the value of a key in a JSON object held by JSONMapSlice diff --git a/vendor/github.com/go-openapi/validate/.golangci.yml b/vendor/github.com/go-openapi/validate/.golangci.yml index 81818ca6..22f8d21c 100644 --- a/vendor/github.com/go-openapi/validate/.golangci.yml +++ b/vendor/github.com/go-openapi/validate/.golangci.yml @@ -1,12 +1,14 @@ linters-settings: govet: check-shadowing: true + golint: + min-confidence: 0 gocyclo: - min-complexity: 50 + min-complexity: 45 maligned: suggest-new: true dupl: - threshold: 100 + threshold: 200 goconst: min-len: 2 min-occurrences: 3 @@ -15,36 +17,45 @@ linters: enable-all: true disable: - maligned + - unparam - lll + - gochecknoinits + - gochecknoglobals + - funlen - godox - gocognit - whitespace - wsl - - funlen - - gochecknoglobals - - gochecknoinits - - scopelint - wrapcheck - - exhaustivestruct - - exhaustive - - nlreturn - testpackage - - gci - - gofumpt - - goerr113 + - nlreturn - gomnd - - tparallel + - exhaustivestruct + - goerr113 + - errorlint - nestif - godot - - tparallel + - gofumpt - paralleltest - - cyclop # because we have gocyclo already - # TODO: review the linters below. We disabled them to make the CI pass first. - - ireturn + - tparallel + - thelper + - ifshort + - exhaustruct - varnamelen + - gci + - depguard + - errchkjson + - inamedparam + - nonamedreturns + - musttag + - ireturn - forcetypeassert - - thelper - # Disable deprecated linters. - # They will be removed from golangci-lint in future. + - cyclop + # deprecated linters + - deadcode - interfacer - - golint \ No newline at end of file + - scopelint + - varcheck + - structcheck + - golint + - nosnakecase diff --git a/vendor/github.com/go-openapi/validate/README.md b/vendor/github.com/go-openapi/validate/README.md index ea2d68cb..e8e1bb21 100644 --- a/vendor/github.com/go-openapi/validate/README.md +++ b/vendor/github.com/go-openapi/validate/README.md @@ -1,7 +1,5 @@ -# Validation helpers -[![Build Status](https://travis-ci.org/go-openapi/validate.svg?branch=master)](https://travis-ci.org/go-openapi/validate) -[![Build status](https://ci.appveyor.com/api/projects/status/d6epy6vipueyh5fs/branch/master?svg=true)](https://ci.appveyor.com/project/fredbi/validate/branch/master) -[![codecov](https://codecov.io/gh/go-openapi/validate/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/validate) +# Validation helpers [![Build Status](https://github.com/go-openapi/validate/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/validate/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/validate/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/validate) + [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) [![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/validate/master/LICENSE) [![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/validate.svg)](https://pkg.go.dev/github.com/go-openapi/validate) @@ -24,7 +22,7 @@ Reference can be found here: https://github.com/OAI/OpenAPI-Specification/blob/m * Minimum, Maximum, MultipleOf * FormatOf -[Documentation](https://godoc.org/github.com/go-openapi/validate) +[Documentation](https://pkg.go.dev/github.com/go-openapi/validate) ## FAQ diff --git a/vendor/github.com/go-openapi/validate/appveyor.yml b/vendor/github.com/go-openapi/validate/appveyor.yml deleted file mode 100644 index 89e5bccb..00000000 --- a/vendor/github.com/go-openapi/validate/appveyor.yml +++ /dev/null @@ -1,32 +0,0 @@ -version: "0.1.{build}" - -clone_folder: C:\go-openapi\validate -shallow_clone: true # for startup speed -pull_requests: - do_not_increment_build_number: true - -#skip_tags: true -#skip_branch_with_pr: true - -# appveyor.yml -build: off - -environment: - GOPATH: c:\gopath - -stack: go 1.15 - -test_script: - - go test -v -timeout 20m -args -enable-long ./... - -deploy: off - -notifications: - - provider: Slack - incoming_webhook: https://hooks.slack.com/services/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 - auth_token: - secure: Sf7kZf7ZGbnwWUMpffHwMu5A0cHkLK2MYY32LNTPj4+/3qC3Ghl7+9v4TSLOqOlCwdRNjOGblAq7s+GDJed6/xgRQl1JtCi1klzZNrYX4q01pgTPvvGcwbBkIYgeMaPeIRcK9OZnud7sRXdttozgTOpytps2U6Js32ip7uj5mHSg2ub0FwoSJwlS6dbezZ8+eDhoha0F/guY99BEwx8Bd+zROrT2TFGsSGOFGN6wFc7moCqTHO/YkWib13a2QNXqOxCCVBy/lt76Wp+JkeFppjHlzs/2lP3EAk13RIUAaesdEUHvIHrzCyNJEd3/+KO2DzsWOYfpktd+KBCvgaYOsoo7ubdT3IROeAegZdCgo/6xgCEsmFc9ZcqCfN5yNx2A+BZ2Vwmpws+bQ1E1+B5HDzzaiLcYfG4X2O210QVGVDLWsv1jqD+uPYeHY2WRfh5ZsIUFvaqgUEnwHwrK44/8REAhQavt1QAj5uJpsRd7CkRVPWRNK+yIky+wgbVUFEchRNmS55E7QWf+W4+4QZkQi7vUTMc9nbTUu2Es9NfvfudOpM2wZbn98fjpb/qq/nRv6Bk+ca+7XD5/IgNLMbWp2ouDdzbiHLCOfDUiHiDJhLfFZx9Bwo7ZwfzeOlbrQX66bx7xRKYmOe4DLrXhNcpbsMa8qbfxlZRCmYbubB/Y8h4= - channel: bots - on_build_success: false - on_build_failure: true - on_build_status_changed: true diff --git a/vendor/github.com/go-openapi/validate/default_validator.go b/vendor/github.com/go-openapi/validate/default_validator.go index bd14c2a2..7f7fd0be 100644 --- a/vendor/github.com/go-openapi/validate/default_validator.go +++ b/vendor/github.com/go-openapi/validate/default_validator.go @@ -170,7 +170,6 @@ func (d *defaultValidator) validateDefaultInResponse(resp *spec.Response, respon responseName, responseCodeAsStr := responseHelp.responseMsgVariants(responseType, responseCode) - // nolint: dupl if response.Headers != nil { // Safeguard for nm, h := range response.Headers { // reset explored schemas to get depth-first recursive-proof exploration @@ -262,7 +261,7 @@ func (d *defaultValidator) validateDefaultValueSchemaAgainstSchema(path, in stri } // TODO: Temporary duplicated code. Need to refactor with examples -// nolint: dupl + func (d *defaultValidator) validateDefaultValueItemsAgainstSchema(path, in string, root interface{}, items *spec.Items) *Result { res := new(Result) s := d.SpecValidator diff --git a/vendor/github.com/go-openapi/validate/doc.go b/vendor/github.com/go-openapi/validate/doc.go index f5ca9a5d..d2b901ea 100644 --- a/vendor/github.com/go-openapi/validate/doc.go +++ b/vendor/github.com/go-openapi/validate/doc.go @@ -19,7 +19,7 @@ as well as tools to validate data against their schema. This package follows Swagger 2.0. specification (aka OpenAPI 2.0). Reference can be found here: https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md. -Validating a specification +# Validating a specification Validates a spec document (from JSON or YAML) against the JSON schema for swagger, then checks a number of extra rules that can't be expressed in JSON schema. @@ -30,34 +30,36 @@ Entry points: - SpecValidator.Validate() Reported as errors: - [x] definition can't declare a property that's already defined by one of its ancestors - [x] definition's ancestor can't be a descendant of the same model - [x] path uniqueness: each api path should be non-verbatim (account for path param names) unique per method - [x] each security reference should contain only unique scopes - [x] each security scope in a security definition should be unique - [x] parameters in path must be unique - [x] each path parameter must correspond to a parameter placeholder and vice versa - [x] each referenceable definition must have references - [x] each definition property listed in the required array must be defined in the properties of the model - [x] each parameter should have a unique `name` and `type` combination - [x] each operation should have only 1 parameter of type body - [x] each reference must point to a valid object - [x] every default value that is specified must validate against the schema for that property - [x] items property is required for all schemas/definitions of type `array` - [x] path parameters must be declared a required - [x] headers must not contain $ref - [x] schema and property examples provided must validate against their respective object's schema - [x] examples provided must validate their schema + + [x] definition can't declare a property that's already defined by one of its ancestors + [x] definition's ancestor can't be a descendant of the same model + [x] path uniqueness: each api path should be non-verbatim (account for path param names) unique per method. Validation can be laxed by disabling StrictPathParamUniqueness. + [x] each security reference should contain only unique scopes + [x] each security scope in a security definition should be unique + [x] parameters in path must be unique + [x] each path parameter must correspond to a parameter placeholder and vice versa + [x] each referenceable definition must have references + [x] each definition property listed in the required array must be defined in the properties of the model + [x] each parameter should have a unique `name` and `type` combination + [x] each operation should have only 1 parameter of type body + [x] each reference must point to a valid object + [x] every default value that is specified must validate against the schema for that property + [x] items property is required for all schemas/definitions of type `array` + [x] path parameters must be declared a required + [x] headers must not contain $ref + [x] schema and property examples provided must validate against their respective object's schema + [x] examples provided must validate their schema Reported as warnings: - [x] path parameters should not contain any of [{,},\w] - [x] empty path - [x] unused definitions - [x] unsupported validation of examples on non-JSON media types - [x] examples in response without schema - [x] readOnly properties should not be required -Validating a schema + [x] path parameters should not contain any of [{,},\w] + [x] empty path + [x] unused definitions + [x] unsupported validation of examples on non-JSON media types + [x] examples in response without schema + [x] readOnly properties should not be required + +# Validating a schema The schema validation toolkit validates data against JSON-schema-draft 04 schema. @@ -70,16 +72,16 @@ Entry points: - AgainstSchema() - ... -Known limitations +# Known limitations With the current version of this package, the following aspects of swagger are not yet supported: - [ ] errors and warnings are not reported with key/line number in spec - [ ] default values and examples on responses only support application/json producer type - [ ] invalid numeric constraints (such as Minimum, etc..) are not checked except for default and example values - [ ] rules for collectionFormat are not implemented - [ ] no validation rule for polymorphism support (discriminator) [not done here] - [ ] valid js ECMA regexp not supported by Go regexp engine are considered invalid - [ ] arbitrary large numbers are not supported: max is math.MaxFloat64 + [ ] errors and warnings are not reported with key/line number in spec + [ ] default values and examples on responses only support application/json producer type + [ ] invalid numeric constraints (such as Minimum, etc..) are not checked except for default and example values + [ ] rules for collectionFormat are not implemented + [ ] no validation rule for polymorphism support (discriminator) [not done here] + [ ] valid js ECMA regexp not supported by Go regexp engine are considered invalid + [ ] arbitrary large numbers are not supported: max is math.MaxFloat64 */ package validate diff --git a/vendor/github.com/go-openapi/validate/example_validator.go b/vendor/github.com/go-openapi/validate/example_validator.go index c8bffd78..d5d4b883 100644 --- a/vendor/github.com/go-openapi/validate/example_validator.go +++ b/vendor/github.com/go-openapi/validate/example_validator.go @@ -48,7 +48,6 @@ func (ex *exampleValidator) isVisited(path string) bool { // - schemas // - individual property // - responses -// func (ex *exampleValidator) Validate() (errs *Result) { errs = new(Result) if ex == nil || ex.SpecValidator == nil { @@ -146,7 +145,6 @@ func (ex *exampleValidator) validateExampleInResponse(resp *spec.Response, respo responseName, responseCodeAsStr := responseHelp.responseMsgVariants(responseType, responseCode) - // nolint: dupl if response.Headers != nil { // Safeguard for nm, h := range response.Headers { // reset explored schemas to get depth-first recursive-proof exploration @@ -251,7 +249,8 @@ func (ex *exampleValidator) validateExampleValueSchemaAgainstSchema(path, in str } // TODO: Temporary duplicated code. Need to refactor with examples -// nolint: dupl +// + func (ex *exampleValidator) validateExampleValueItemsAgainstSchema(path, in string, root interface{}, items *spec.Items) *Result { res := new(Result) s := ex.SpecValidator diff --git a/vendor/github.com/go-openapi/validate/helpers.go b/vendor/github.com/go-openapi/validate/helpers.go index 48ebfab5..fc7500c8 100644 --- a/vendor/github.com/go-openapi/validate/helpers.go +++ b/vendor/github.com/go-openapi/validate/helpers.go @@ -157,7 +157,7 @@ func (h *valueHelper) asInt64(val interface{}) int64 { // Number conversion function for int64, without error checking // (implements an implicit type upgrade). v := reflect.ValueOf(val) - switch v.Kind() { + switch v.Kind() { //nolint:exhaustive case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return v.Int() case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: @@ -174,7 +174,7 @@ func (h *valueHelper) asUint64(val interface{}) uint64 { // Number conversion function for uint64, without error checking // (implements an implicit type upgrade). v := reflect.ValueOf(val) - switch v.Kind() { + switch v.Kind() { //nolint:exhaustive case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return uint64(v.Int()) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: @@ -192,7 +192,7 @@ func (h *valueHelper) asFloat64(val interface{}) float64 { // Number conversion function for float64, without error checking // (implements an implicit type upgrade). v := reflect.ValueOf(val) - switch v.Kind() { + switch v.Kind() { //nolint:exhaustive case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return float64(v.Int()) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: @@ -250,7 +250,7 @@ func (h *paramHelper) resolveParam(path, method, operationID string, param *spec } if err != nil { // Safeguard - // NOTE: we may enter enter here when the whole parameter is an unresolved $ref + // NOTE: we may enter here when the whole parameter is an unresolved $ref refPath := strings.Join([]string{"\"" + path + "\"", method}, ".") errorHelp.addPointerError(res, err, param.Ref.String(), refPath) return nil, res diff --git a/vendor/github.com/go-openapi/validate/options.go b/vendor/github.com/go-openapi/validate/options.go index deeec2f2..8a22ce99 100644 --- a/vendor/github.com/go-openapi/validate/options.go +++ b/vendor/github.com/go-openapi/validate/options.go @@ -21,10 +21,28 @@ import "sync" // NOTE: other options might be needed, for example a go-swagger specific mode. type Opts struct { ContinueOnErrors bool // true: continue reporting errors, even if spec is invalid + + // StrictPathParamUniqueness enables a strict validation of paths that include + // path parameters. When true, it will enforce that for each method, the path + // is unique, regardless of path parameters such that GET:/petstore/{id} and + // GET:/petstore/{pet} anre considered duplicate paths. + // + // Consider disabling if path parameters can include slashes such as + // GET:/v1/{shelve} and GET:/v1/{book}, where the IDs are "shelve/*" and + // /"shelve/*/book/*" respectively. + StrictPathParamUniqueness bool } var ( - defaultOpts = Opts{ContinueOnErrors: false} // default is to stop validation on errors + defaultOpts = Opts{ + // default is to stop validation on errors + ContinueOnErrors: false, + + // StrictPathParamUniqueness is defaulted to true. This maintains existing + // behavior. + StrictPathParamUniqueness: true, + } + defaultOptsMutex = &sync.Mutex{} ) diff --git a/vendor/github.com/go-openapi/validate/result.go b/vendor/github.com/go-openapi/validate/result.go index 8f5f935e..0fe934fe 100644 --- a/vendor/github.com/go-openapi/validate/result.go +++ b/vendor/github.com/go-openapi/validate/result.go @@ -132,7 +132,6 @@ func (r *Result) RootObjectSchemata() []*spec.Schema { } // FieldSchemata returns the schemata which apply to fields in objects. -// nolint: dupl func (r *Result) FieldSchemata() map[FieldKey][]*spec.Schema { if r.cachedFieldSchemta != nil { return r.cachedFieldSchemta @@ -152,7 +151,6 @@ func (r *Result) FieldSchemata() map[FieldKey][]*spec.Schema { } // ItemSchemata returns the schemata which apply to items in slices. -// nolint: dupl func (r *Result) ItemSchemata() map[ItemKey][]*spec.Schema { if r.cachedItemSchemata != nil { return r.cachedItemSchemata @@ -177,7 +175,8 @@ func (r *Result) resetCaches() { } // mergeForField merges other into r, assigning other's root schemata to the given Object and field name. -// nolint: unparam +// +//nolint:unparam func (r *Result) mergeForField(obj map[string]interface{}, field string, other *Result) *Result { if other == nil { return r @@ -199,7 +198,8 @@ func (r *Result) mergeForField(obj map[string]interface{}, field string, other * } // mergeForSlice merges other into r, assigning other's root schemata to the given slice and index. -// nolint: unparam +// +//nolint:unparam func (r *Result) mergeForSlice(slice reflect.Value, i int, other *Result) *Result { if other == nil { return r diff --git a/vendor/github.com/go-openapi/validate/schema.go b/vendor/github.com/go-openapi/validate/schema.go index b817eb0e..62b91dc5 100644 --- a/vendor/github.com/go-openapi/validate/schema.go +++ b/vendor/github.com/go-openapi/validate/schema.go @@ -101,7 +101,7 @@ func (s *SchemaValidator) SetPath(path string) { } // Applies returns true when this schema validator applies -func (s *SchemaValidator) Applies(source interface{}, kind reflect.Kind) bool { +func (s *SchemaValidator) Applies(source interface{}, _ reflect.Kind) bool { _, ok := source.(*spec.Schema) return ok } diff --git a/vendor/github.com/go-openapi/validate/spec.go b/vendor/github.com/go-openapi/validate/spec.go index dff01f00..30168b71 100644 --- a/vendor/github.com/go-openapi/validate/spec.go +++ b/vendor/github.com/go-openapi/validate/spec.go @@ -26,23 +26,23 @@ import ( "github.com/go-openapi/loads" "github.com/go-openapi/spec" "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" ) // Spec validates an OpenAPI 2.0 specification document. // // Returns an error flattening in a single standard error, all validation messages. // -// - TODO: $ref should not have siblings -// - TODO: make sure documentation reflects all checks and warnings -// - TODO: check on discriminators -// - TODO: explicit message on unsupported keywords (better than "forbidden property"...) -// - TODO: full list of unresolved refs -// - TODO: validate numeric constraints (issue#581): this should be handled like defaults and examples -// - TODO: option to determine if we validate for go-swagger or in a more general context -// - TODO: check on required properties to support anyOf, allOf, oneOf +// - TODO: $ref should not have siblings +// - TODO: make sure documentation reflects all checks and warnings +// - TODO: check on discriminators +// - TODO: explicit message on unsupported keywords (better than "forbidden property"...) +// - TODO: full list of unresolved refs +// - TODO: validate numeric constraints (issue#581): this should be handled like defaults and examples +// - TODO: option to determine if we validate for go-swagger or in a more general context +// - TODO: check on required properties to support anyOf, allOf, oneOf // // NOTE: SecurityScopes are maps: no need to check uniqueness -// func Spec(doc *loads.Document, formats strfmt.Registry) error { errs, _ /*warns*/ := NewSpecValidator(doc.Schema(), formats).Validate(doc) if errs.HasErrors() { @@ -456,7 +456,6 @@ func (s *SpecValidator) validateReferenced() *Result { return &res } -// nolint: dupl func (s *SpecValidator) validateReferencedParameters() *Result { // Each referenceable definition should have references. params := s.spec.Spec().Parameters @@ -482,7 +481,6 @@ func (s *SpecValidator) validateReferencedParameters() *Result { return result } -// nolint: dupl func (s *SpecValidator) validateReferencedResponses() *Result { // Each referenceable definition should have references. responses := s.spec.Spec().Responses @@ -508,7 +506,6 @@ func (s *SpecValidator) validateReferencedResponses() *Result { return result } -// nolint: dupl func (s *SpecValidator) validateReferencedDefinitions() *Result { // Each referenceable definition must have references. defs := s.spec.Spec().Definitions @@ -615,7 +612,7 @@ func (s *SpecValidator) validateRequiredProperties(path, in string, v *spec.Sche func (s *SpecValidator) validateParameters() *Result { // - for each method, path is unique, regardless of path parameters // e.g. GET:/petstore/{id}, GET:/petstore/{pet}, GET:/petstore are - // considered duplicate paths + // considered duplicate paths, if StrictPathParamUniqueness is enabled. // - each parameter should have a unique `name` and `type` combination // - each operation should have only 1 parameter of type body // - there must be at most 1 parameter in body @@ -627,28 +624,30 @@ func (s *SpecValidator) validateParameters() *Result { for method, pi := range s.expandedAnalyzer().Operations() { methodPaths := make(map[string]map[string]string) for path, op := range pi { - pathToAdd := pathHelp.stripParametersInPath(path) + if s.Options.StrictPathParamUniqueness { + pathToAdd := pathHelp.stripParametersInPath(path) - // Warn on garbled path afer param stripping - if rexGarbledPathSegment.MatchString(pathToAdd) { - res.AddWarnings(pathStrippedParamGarbledMsg(pathToAdd)) - } + // Warn on garbled path afer param stripping + if rexGarbledPathSegment.MatchString(pathToAdd) { + res.AddWarnings(pathStrippedParamGarbledMsg(pathToAdd)) + } - // Check uniqueness of stripped paths - if _, found := methodPaths[method][pathToAdd]; found { + // Check uniqueness of stripped paths + if _, found := methodPaths[method][pathToAdd]; found { - // Sort names for stable, testable output - if strings.Compare(path, methodPaths[method][pathToAdd]) < 0 { - res.AddErrors(pathOverlapMsg(path, methodPaths[method][pathToAdd])) + // Sort names for stable, testable output + if strings.Compare(path, methodPaths[method][pathToAdd]) < 0 { + res.AddErrors(pathOverlapMsg(path, methodPaths[method][pathToAdd])) + } else { + res.AddErrors(pathOverlapMsg(methodPaths[method][pathToAdd], path)) + } } else { - res.AddErrors(pathOverlapMsg(methodPaths[method][pathToAdd], path)) - } - } else { - if _, found := methodPaths[method]; !found { - methodPaths[method] = map[string]string{} - } - methodPaths[method][pathToAdd] = path // Original non stripped path + if _, found := methodPaths[method]; !found { + methodPaths[method] = map[string]string{} + } + methodPaths[method][pathToAdd] = path // Original non stripped path + } } var bodyParams []string @@ -659,7 +658,18 @@ func (s *SpecValidator) validateParameters() *Result { // TODO: should be done after param expansion res.Merge(s.checkUniqueParams(path, method, op)) + paramSchema, ok := s.schema.Definitions["parameter"] + if !ok { + panic("unexpected swagger schema: missing #/definitions/parameter") + } + for _, pr := range paramHelp.safeExpandedParamsFor(path, method, op.ID, res, s) { + // An expanded parameter must validate its schema (an unexpanded $ref always pass high-level schema validation) + schv := NewSchemaValidator(¶mSchema, s.schema, fmt.Sprintf("%s.%s.parameters.%s", path, method, pr.Name), s.KnownFormats, SwaggerSchema(true)) + obj := swag.ToDynamicJSON(pr) + paramValidationResult := schv.Validate(obj) + res.Merge(paramValidationResult) + // Validate pattern regexp for parameters with a Pattern property if _, err := compileRegexp(pr.Pattern); err != nil { res.AddErrors(invalidPatternInParamMsg(op.ID, pr.Name, pr.Pattern)) diff --git a/vendor/github.com/go-openapi/validate/spec_messages.go b/vendor/github.com/go-openapi/validate/spec_messages.go index b3757add..5398679b 100644 --- a/vendor/github.com/go-openapi/validate/spec_messages.go +++ b/vendor/github.com/go-openapi/validate/spec_messages.go @@ -349,9 +349,10 @@ func parameterValidationTypeMismatchMsg(param, path, typ string) errors.Error { } // disabled -// func invalidResponseDefinitionAsSchemaMsg(path, method string) errors.Error { -// return errors.New(errors.CompositeErrorCode, InvalidResponseDefinitionAsSchemaError, path, method) -// } +// +// func invalidResponseDefinitionAsSchemaMsg(path, method string) errors.Error { +// return errors.New(errors.CompositeErrorCode, InvalidResponseDefinitionAsSchemaError, path, method) +// } func someParametersBrokenMsg(path, method, operationID string) errors.Error { return errors.New(errors.CompositeErrorCode, SomeParametersBrokenError, path, method, operationID) } diff --git a/vendor/github.com/go-openapi/validate/type.go b/vendor/github.com/go-openapi/validate/type.go index 87646758..c7abf380 100644 --- a/vendor/github.com/go-openapi/validate/type.go +++ b/vendor/github.com/go-openapi/validate/type.go @@ -90,7 +90,7 @@ func (t *typeValidator) schemaInfoForType(data interface{}) (string, string) { default: val := reflect.ValueOf(data) tpe := val.Type() - switch tpe.Kind() { + switch tpe.Kind() { //nolint:exhaustive case reflect.Bool: return booleanType, "" case reflect.String: diff --git a/vendor/github.com/go-openapi/validate/validator.go b/vendor/github.com/go-openapi/validate/validator.go index 38cdb9bb..ab4f718b 100644 --- a/vendor/github.com/go-openapi/validate/validator.go +++ b/vendor/github.com/go-openapi/validate/validator.go @@ -148,7 +148,7 @@ func (b *basicCommonValidator) SetPath(path string) { b.Path = path } -func (b *basicCommonValidator) Applies(source interface{}, kind reflect.Kind) bool { +func (b *basicCommonValidator) Applies(source interface{}, _ reflect.Kind) bool { switch source.(type) { case *spec.Parameter, *spec.Schema, *spec.Header: return true @@ -548,7 +548,6 @@ func (n *numberValidator) Validate(val interface{}) *Result { } } - // nolint: dupl if n.Maximum != nil { // Is the constraint specifier within the range of the specific numeric type and format? resMaximum.AddErrors(IsValueValidAgainstRange(*n.Maximum, n.Type, n.Format, "Maximum boundary", n.Path)) @@ -565,7 +564,6 @@ func (n *numberValidator) Validate(val interface{}) *Result { } } - // nolint: dupl if n.Minimum != nil { // Is the constraint specifier within the range of the specific numeric type and format? resMinimum.AddErrors(IsValueValidAgainstRange(*n.Minimum, n.Type, n.Format, "Minimum boundary", n.Path)) diff --git a/vendor/github.com/go-openapi/validate/values.go b/vendor/github.com/go-openapi/validate/values.go index e7ad8c10..5f6f5ee6 100644 --- a/vendor/github.com/go-openapi/validate/values.go +++ b/vendor/github.com/go-openapi/validate/values.go @@ -120,7 +120,7 @@ func UniqueItems(path, in string, data interface{}) *errors.Validation { // MinLength validates a string for minimum length func MinLength(path, in, data string, minLength int64) *errors.Validation { - strLen := int64(utf8.RuneCount([]byte(data))) + strLen := int64(utf8.RuneCountInString(data)) if strLen < minLength { return errors.TooShort(path, in, minLength, data) } @@ -129,7 +129,7 @@ func MinLength(path, in, data string, minLength int64) *errors.Validation { // MaxLength validates a string for maximum length func MaxLength(path, in, data string, maxLength int64) *errors.Validation { - strLen := int64(utf8.RuneCount([]byte(data))) + strLen := int64(utf8.RuneCountInString(data)) if strLen > maxLength { return errors.TooLong(path, in, maxLength, data) } @@ -315,7 +315,7 @@ func FormatOf(path, in, format, data string, registry strfmt.Registry) *errors.V // TODO: Normally, a JSON MAX_SAFE_INTEGER check would ensure conversion remains loss-free func MaximumNativeType(path, in string, val interface{}, max float64, exclusive bool) *errors.Validation { kind := reflect.ValueOf(val).Type().Kind() - switch kind { + switch kind { //nolint:exhaustive case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: value := valueHelp.asInt64(val) return MaximumInt(path, in, value, int64(max), exclusive) @@ -345,7 +345,7 @@ func MaximumNativeType(path, in string, val interface{}, max float64, exclusive // TODO: Normally, a JSON MAX_SAFE_INTEGER check would ensure conversion remains loss-free func MinimumNativeType(path, in string, val interface{}, min float64, exclusive bool) *errors.Validation { kind := reflect.ValueOf(val).Type().Kind() - switch kind { + switch kind { //nolint:exhaustive case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: value := valueHelp.asInt64(val) return MinimumInt(path, in, value, int64(min), exclusive) @@ -375,7 +375,7 @@ func MinimumNativeType(path, in string, val interface{}, min float64, exclusive // TODO: Normally, a JSON MAX_SAFE_INTEGER check would ensure conversion remains loss-free func MultipleOfNativeType(path, in string, val interface{}, multipleOf float64) *errors.Validation { kind := reflect.ValueOf(val).Type().Kind() - switch kind { + switch kind { //nolint:exhaustive case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: value := valueHelp.asInt64(val) return MultipleOfInt(path, in, value, int64(multipleOf)) @@ -399,7 +399,7 @@ func IsValueValidAgainstRange(val interface{}, typeName, format, prefix, path st // What is the string representation of val var stringRep string - switch kind { + switch kind { //nolint:exhaustive case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: stringRep = swag.FormatUint64(valueHelp.asUint64(val)) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: diff --git a/vendor/github.com/google/uuid/CHANGELOG.md b/vendor/github.com/google/uuid/CHANGELOG.md index 7ed347d3..c9fb829d 100644 --- a/vendor/github.com/google/uuid/CHANGELOG.md +++ b/vendor/github.com/google/uuid/CHANGELOG.md @@ -1,5 +1,12 @@ # Changelog +## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12) + + +### Features + +* Validate UUID without creating new UUID ([#141](https://github.com/google/uuid/issues/141)) ([9ee7366](https://github.com/google/uuid/commit/9ee7366e66c9ad96bab89139418a713dc584ae29)) + ## [1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) (2023-10-26) diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go index e6ef06cd..c3511292 100644 --- a/vendor/github.com/google/uuid/time.go +++ b/vendor/github.com/google/uuid/time.go @@ -108,12 +108,23 @@ func setClockSequence(seq int) { } // Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in -// uuid. The time is only defined for version 1 and 2 UUIDs. +// uuid. The time is only defined for version 1, 2, 6 and 7 UUIDs. func (uuid UUID) Time() Time { - time := int64(binary.BigEndian.Uint32(uuid[0:4])) - time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 - time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 - return Time(time) + var t Time + switch uuid.Version() { + case 6: + time := binary.BigEndian.Uint64(uuid[:8]) // Ignore uuid[6] version b0110 + t = Time(time) + case 7: + time := binary.BigEndian.Uint64(uuid[:8]) + t = Time((time>>16)*10000 + g1582ns100) + default: // forward compatible + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + t = Time(time) + } + return t } // ClockSequence returns the clock sequence encoded in uuid. diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go index dc75f7d9..5232b486 100644 --- a/vendor/github.com/google/uuid/uuid.go +++ b/vendor/github.com/google/uuid/uuid.go @@ -186,6 +186,59 @@ func Must(uuid UUID, err error) UUID { return uuid } +// Validate returns an error if s is not a properly formatted UUID in one of the following formats: +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} +// It returns an error if the format is invalid, otherwise nil. +func Validate(s string) error { + switch len(s) { + // Standard UUID format + case 36: + + // UUID with "urn:uuid:" prefix + case 36 + 9: + if !strings.EqualFold(s[:9], "urn:uuid:") { + return fmt.Errorf("invalid urn prefix: %q", s[:9]) + } + s = s[9:] + + // UUID enclosed in braces + case 36 + 2: + if s[0] != '{' || s[len(s)-1] != '}' { + return fmt.Errorf("invalid bracketed UUID format") + } + s = s[1 : len(s)-1] + + // UUID without hyphens + case 32: + for i := 0; i < len(s); i += 2 { + _, ok := xtob(s[i], s[i+1]) + if !ok { + return errors.New("invalid UUID format") + } + } + + default: + return invalidLengthError{len(s)} + } + + // Check for standard UUID format + if len(s) == 36 { + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return errors.New("invalid UUID format") + } + for _, x := range []int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34} { + if _, ok := xtob(s[x], s[x+1]); !ok { + return errors.New("invalid UUID format") + } + } + } + + return nil +} + // String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx // , or "" if uuid is invalid. func (uuid UUID) String() string { diff --git a/vendor/github.com/google/uuid/version6.go b/vendor/github.com/google/uuid/version6.go new file mode 100644 index 00000000..339a959a --- /dev/null +++ b/vendor/github.com/google/uuid/version6.go @@ -0,0 +1,56 @@ +// Copyright 2023 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "encoding/binary" + +// UUID version 6 is a field-compatible version of UUIDv1, reordered for improved DB locality. +// It is expected that UUIDv6 will primarily be used in contexts where there are existing v1 UUIDs. +// Systems that do not involve legacy UUIDv1 SHOULD consider using UUIDv7 instead. +// +// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#uuidv6 +// +// NewV6 returns a Version 6 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewV6 set NodeID is random bits automatically . If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewV6 returns Nil and an error. +func NewV6() (UUID, error) { + var uuid UUID + now, seq, err := GetTime() + if err != nil { + return uuid, err + } + + /* + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | time_high | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | time_mid | time_low_and_version | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |clk_seq_hi_res | clk_seq_low | node (0-1) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | node (2-5) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ + + binary.BigEndian.PutUint64(uuid[0:], uint64(now)) + binary.BigEndian.PutUint16(uuid[8:], seq) + + uuid[6] = 0x60 | (uuid[6] & 0x0F) + uuid[8] = 0x80 | (uuid[8] & 0x3F) + + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + copy(uuid[10:], nodeID[:]) + nodeMu.Unlock() + + return uuid, nil +} diff --git a/vendor/github.com/google/uuid/version7.go b/vendor/github.com/google/uuid/version7.go new file mode 100644 index 00000000..ba9dd5eb --- /dev/null +++ b/vendor/github.com/google/uuid/version7.go @@ -0,0 +1,75 @@ +// Copyright 2023 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// UUID version 7 features a time-ordered value field derived from the widely +// implemented and well known Unix Epoch timestamp source, +// the number of milliseconds seconds since midnight 1 Jan 1970 UTC, leap seconds excluded. +// As well as improved entropy characteristics over versions 1 or 6. +// +// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#name-uuid-version-7 +// +// Implementations SHOULD utilize UUID version 7 over UUID version 1 and 6 if possible. +// +// NewV7 returns a Version 7 UUID based on the current time(Unix Epoch). +// Uses the randomness pool if it was enabled with EnableRandPool. +// On error, NewV7 returns Nil and an error +func NewV7() (UUID, error) { + uuid, err := NewRandom() + if err != nil { + return uuid, err + } + makeV7(uuid[:]) + return uuid, nil +} + +// NewV7FromReader returns a Version 7 UUID based on the current time(Unix Epoch). +// it use NewRandomFromReader fill random bits. +// On error, NewV7FromReader returns Nil and an error. +func NewV7FromReader(r io.Reader) (UUID, error) { + uuid, err := NewRandomFromReader(r) + if err != nil { + return uuid, err + } + + makeV7(uuid[:]) + return uuid, nil +} + +// makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6]) +// uuid[8] already has the right version number (Variant is 10) +// see function NewV7 and NewV7FromReader +func makeV7(uuid []byte) { + /* + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | unix_ts_ms | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | unix_ts_ms | ver | rand_a | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |var| rand_b | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | rand_b | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ + _ = uuid[15] // bounds check + + t := timeNow().UnixMilli() + + uuid[0] = byte(t >> 40) + uuid[1] = byte(t >> 32) + uuid[2] = byte(t >> 24) + uuid[3] = byte(t >> 16) + uuid[4] = byte(t >> 8) + uuid[5] = byte(t) + + uuid[6] = 0x70 | (uuid[6] & 0x0F) + // uuid[8] has already has right version +} diff --git a/vendor/github.com/gorilla/mux/.editorconfig b/vendor/github.com/gorilla/mux/.editorconfig new file mode 100644 index 00000000..c6b74c3e --- /dev/null +++ b/vendor/github.com/gorilla/mux/.editorconfig @@ -0,0 +1,20 @@ +; https://editorconfig.org/ + +root = true + +[*] +insert_final_newline = true +charset = utf-8 +trim_trailing_whitespace = true +indent_style = space +indent_size = 2 + +[{Makefile,go.mod,go.sum,*.go,.gitmodules}] +indent_style = tab +indent_size = 4 + +[*.md] +indent_size = 4 +trim_trailing_whitespace = false + +eclint_indent_style = unset \ No newline at end of file diff --git a/vendor/github.com/gorilla/mux/.gitignore b/vendor/github.com/gorilla/mux/.gitignore new file mode 100644 index 00000000..84039fec --- /dev/null +++ b/vendor/github.com/gorilla/mux/.gitignore @@ -0,0 +1 @@ +coverage.coverprofile diff --git a/vendor/github.com/gorilla/mux/AUTHORS b/vendor/github.com/gorilla/mux/AUTHORS deleted file mode 100644 index b722392e..00000000 --- a/vendor/github.com/gorilla/mux/AUTHORS +++ /dev/null @@ -1,8 +0,0 @@ -# This is the official list of gorilla/mux authors for copyright purposes. -# -# Please keep the list sorted. - -Google LLC (https://opensource.google.com/) -Kamil Kisielk -Matt Silverlock -Rodrigo Moraes (https://github.com/moraes) diff --git a/vendor/github.com/gorilla/mux/LICENSE b/vendor/github.com/gorilla/mux/LICENSE index 6903df63..bb9d80bc 100644 --- a/vendor/github.com/gorilla/mux/LICENSE +++ b/vendor/github.com/gorilla/mux/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2012-2018 The Gorilla Authors. All rights reserved. +Copyright (c) 2023 The Gorilla Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/vendor/github.com/gorilla/mux/Makefile b/vendor/github.com/gorilla/mux/Makefile new file mode 100644 index 00000000..98f5ab75 --- /dev/null +++ b/vendor/github.com/gorilla/mux/Makefile @@ -0,0 +1,34 @@ +GO_LINT=$(shell which golangci-lint 2> /dev/null || echo '') +GO_LINT_URI=github.com/golangci/golangci-lint/cmd/golangci-lint@latest + +GO_SEC=$(shell which gosec 2> /dev/null || echo '') +GO_SEC_URI=github.com/securego/gosec/v2/cmd/gosec@latest + +GO_VULNCHECK=$(shell which govulncheck 2> /dev/null || echo '') +GO_VULNCHECK_URI=golang.org/x/vuln/cmd/govulncheck@latest + +.PHONY: golangci-lint +golangci-lint: + $(if $(GO_LINT), ,go install $(GO_LINT_URI)) + @echo "##### Running golangci-lint" + golangci-lint run -v + +.PHONY: gosec +gosec: + $(if $(GO_SEC), ,go install $(GO_SEC_URI)) + @echo "##### Running gosec" + gosec ./... + +.PHONY: govulncheck +govulncheck: + $(if $(GO_VULNCHECK), ,go install $(GO_VULNCHECK_URI)) + @echo "##### Running govulncheck" + govulncheck ./... + +.PHONY: verify +verify: golangci-lint gosec govulncheck + +.PHONY: test +test: + @echo "##### Running tests" + go test -race -cover -coverprofile=coverage.coverprofile -covermode=atomic -v ./... \ No newline at end of file diff --git a/vendor/github.com/gorilla/mux/README.md b/vendor/github.com/gorilla/mux/README.md index 35eea9f1..382513d5 100644 --- a/vendor/github.com/gorilla/mux/README.md +++ b/vendor/github.com/gorilla/mux/README.md @@ -1,12 +1,12 @@ # gorilla/mux -[![GoDoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux) -[![CircleCI](https://circleci.com/gh/gorilla/mux.svg?style=svg)](https://circleci.com/gh/gorilla/mux) -[![Sourcegraph](https://sourcegraph.com/github.com/gorilla/mux/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/mux?badge) +![testing](https://github.com/gorilla/mux/actions/workflows/test.yml/badge.svg) +[![codecov](https://codecov.io/github/gorilla/mux/branch/main/graph/badge.svg)](https://codecov.io/github/gorilla/mux) +[![godoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux) +[![sourcegraph](https://sourcegraph.com/github.com/gorilla/mux/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/mux?badge) -![Gorilla Logo](https://cloud-cdn.questionable.services/gorilla-icon-64.png) -https://www.gorillatoolkit.org/pkg/mux +![Gorilla Logo](https://github.com/gorilla/.github/assets/53367916/d92caabf-98e0-473e-bfbf-ab554ba435e5) Package `gorilla/mux` implements a request router and dispatcher for matching incoming requests to their respective handler. @@ -247,32 +247,25 @@ type spaHandler struct { // file located at the index path on the SPA handler will be served. This // is suitable behavior for serving an SPA (single page application). func (h spaHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - // get the absolute path to prevent directory traversal - path, err := filepath.Abs(r.URL.Path) - if err != nil { - // if we failed to get the absolute path respond with a 400 bad request - // and stop - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - // prepend the path with the path to the static directory - path = filepath.Join(h.staticPath, path) + // Join internally call path.Clean to prevent directory traversal + path := filepath.Join(h.staticPath, r.URL.Path) - // check whether a file exists at the given path - _, err = os.Stat(path) - if os.IsNotExist(err) { - // file does not exist, serve index.html + // check whether a file exists or is a directory at the given path + fi, err := os.Stat(path) + if os.IsNotExist(err) || fi.IsDir() { + // file does not exist or path is a directory, serve index.html http.ServeFile(w, r, filepath.Join(h.staticPath, h.indexPath)) return - } else if err != nil { - // if we got an error (that wasn't that the file doesn't exist) stating the - // file, return a 500 internal server error and stop + } + + if err != nil { + // if we got an error (that wasn't that the file doesn't exist) stating the + // file, return a 500 internal server error and stop http.Error(w, err.Error(), http.StatusInternalServerError) - return + return } - // otherwise, use http.FileServer to serve the static dir + // otherwise, use http.FileServer to serve the static file http.FileServer(http.Dir(h.staticPath)).ServeHTTP(w, r) } @@ -375,6 +368,19 @@ url, err := r.Get("article").URL("subdomain", "news", "id", "42") ``` +To find all the required variables for a given route when calling `URL()`, the method `GetVarNames()` is available: +```go +r := mux.NewRouter() +r.Host("{domain}"). + Path("/{group}/{item_id}"). + Queries("some_data1", "{some_data1}"). + Queries("some_data2", "{some_data2}"). + Name("article") + +// Will print [domain group item_id some_data1 some_data2] +fmt.Println(r.Get("article").GetVarNames()) + +``` ### Walking Routes The `Walk` function on `mux.Router` can be used to visit all of the routes that are registered on a router. For example, @@ -572,7 +578,7 @@ func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler r := mux.NewRouter() r.HandleFunc("/", handler) -amw := authenticationMiddleware{} +amw := authenticationMiddleware{tokenUsers: make(map[string]string)} amw.Populate() r.Use(amw.Middleware) @@ -758,7 +764,8 @@ func TestMetricsHandler(t *testing.T) { rr := httptest.NewRecorder() - // Need to create a router that we can pass the request through so that the vars will be added to the context + // To add the vars to the context, + // we need to create a router through which we can pass the request. router := mux.NewRouter() router.HandleFunc("/metrics/{type}", MetricsHandler) router.ServeHTTP(rr, req) diff --git a/vendor/github.com/gorilla/mux/doc.go b/vendor/github.com/gorilla/mux/doc.go index bd5a38b5..80601351 100644 --- a/vendor/github.com/gorilla/mux/doc.go +++ b/vendor/github.com/gorilla/mux/doc.go @@ -10,18 +10,18 @@ http.ServeMux, mux.Router matches incoming requests against a list of registered routes and calls a handler for the route that matches the URL or other conditions. The main features are: - * Requests can be matched based on URL host, path, path prefix, schemes, - header and query values, HTTP methods or using custom matchers. - * URL hosts, paths and query values can have variables with an optional - regular expression. - * Registered URLs can be built, or "reversed", which helps maintaining - references to resources. - * Routes can be used as subrouters: nested routes are only tested if the - parent route matches. This is useful to define groups of routes that - share common conditions like a host, a path prefix or other repeated - attributes. As a bonus, this optimizes request matching. - * It implements the http.Handler interface so it is compatible with the - standard http.ServeMux. + - Requests can be matched based on URL host, path, path prefix, schemes, + header and query values, HTTP methods or using custom matchers. + - URL hosts, paths and query values can have variables with an optional + regular expression. + - Registered URLs can be built, or "reversed", which helps maintaining + references to resources. + - Routes can be used as subrouters: nested routes are only tested if the + parent route matches. This is useful to define groups of routes that + share common conditions like a host, a path prefix or other repeated + attributes. As a bonus, this optimizes request matching. + - It implements the http.Handler interface so it is compatible with the + standard http.ServeMux. Let's start registering a couple of URL paths and handlers: @@ -301,6 +301,5 @@ A more complex authentication middleware, which maps session token to users, cou r.Use(amw.Middleware) Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to. - */ package mux diff --git a/vendor/github.com/gorilla/mux/mux.go b/vendor/github.com/gorilla/mux/mux.go index 782a34b2..1e089906 100644 --- a/vendor/github.com/gorilla/mux/mux.go +++ b/vendor/github.com/gorilla/mux/mux.go @@ -31,24 +31,26 @@ func NewRouter() *Router { // It implements the http.Handler interface, so it can be registered to serve // requests: // -// var router = mux.NewRouter() +// var router = mux.NewRouter() // -// func main() { -// http.Handle("/", router) -// } +// func main() { +// http.Handle("/", router) +// } // // Or, for Google App Engine, register it in a init() function: // -// func init() { -// http.Handle("/", router) -// } +// func init() { +// http.Handle("/", router) +// } // // This will send all incoming requests to the router. type Router struct { // Configurable Handler to be used when no route matches. + // This can be used to render your own 404 Not Found errors. NotFoundHandler http.Handler // Configurable Handler to be used when the request method does not match the route. + // This can be used to render your own 405 Method Not Allowed errors. MethodNotAllowedHandler http.Handler // Routes to be matched, in order. diff --git a/vendor/github.com/gorilla/mux/regexp.go b/vendor/github.com/gorilla/mux/regexp.go index 0144842b..5d05cfa0 100644 --- a/vendor/github.com/gorilla/mux/regexp.go +++ b/vendor/github.com/gorilla/mux/regexp.go @@ -22,10 +22,10 @@ type routeRegexpOptions struct { type regexpType int const ( - regexpTypePath regexpType = 0 - regexpTypeHost regexpType = 1 - regexpTypePrefix regexpType = 2 - regexpTypeQuery regexpType = 3 + regexpTypePath regexpType = iota + regexpTypeHost + regexpTypePrefix + regexpTypeQuery ) // newRouteRegexp parses a route template and returns a routeRegexp, @@ -195,7 +195,7 @@ func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool { // url builds a URL part using the given values. func (r *routeRegexp) url(values map[string]string) (string, error) { - urlValues := make([]interface{}, len(r.varsN), len(r.varsN)) + urlValues := make([]interface{}, len(r.varsN)) for k, v := range r.varsN { value, ok := values[v] if !ok { diff --git a/vendor/github.com/gorilla/mux/route.go b/vendor/github.com/gorilla/mux/route.go index 750afe57..e8f11df2 100644 --- a/vendor/github.com/gorilla/mux/route.go +++ b/vendor/github.com/gorilla/mux/route.go @@ -64,8 +64,18 @@ func (r *Route) Match(req *http.Request, match *RouteMatch) bool { match.MatchErr = nil } - matchErr = nil + matchErr = nil // nolint:ineffassign return false + } else { + // Multiple routes may share the same path but use different HTTP methods. For instance: + // Route 1: POST "/users/{id}". + // Route 2: GET "/users/{id}", parameters: "id": "[0-9]+". + // + // The router must handle these cases correctly. For a GET request to "/users/abc" with "id" as "-2", + // The router should return a "Not Found" error as no route fully matches this request. + if match.MatchErr == ErrMethodMismatch { + match.MatchErr = nil + } } } @@ -230,9 +240,9 @@ func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool { // Headers adds a matcher for request header values. // It accepts a sequence of key/value pairs to be matched. For example: // -// r := mux.NewRouter() -// r.Headers("Content-Type", "application/json", -// "X-Requested-With", "XMLHttpRequest") +// r := mux.NewRouter().NewRoute() +// r.Headers("Content-Type", "application/json", +// "X-Requested-With", "XMLHttpRequest") // // The above route will only match if both request header values match. // If the value is an empty string, it will match any value if the key is set. @@ -255,9 +265,9 @@ func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool { // HeadersRegexp accepts a sequence of key/value pairs, where the value has regex // support. For example: // -// r := mux.NewRouter() -// r.HeadersRegexp("Content-Type", "application/(text|json)", -// "X-Requested-With", "XMLHttpRequest") +// r := mux.NewRouter().NewRoute() +// r.HeadersRegexp("Content-Type", "application/(text|json)", +// "X-Requested-With", "XMLHttpRequest") // // The above route will only match if both the request header matches both regular expressions. // If the value is an empty string, it will match any value if the key is set. @@ -283,10 +293,10 @@ func (r *Route) HeadersRegexp(pairs ...string) *Route { // // For example: // -// r := mux.NewRouter() -// r.Host("www.example.com") -// r.Host("{subdomain}.domain.com") -// r.Host("{subdomain:[a-z]+}.domain.com") +// r := mux.NewRouter().NewRoute() +// r.Host("www.example.com") +// r.Host("{subdomain}.domain.com") +// r.Host("{subdomain:[a-z]+}.domain.com") // // Variable names must be unique in a given route. They can be retrieved // calling mux.Vars(request). @@ -342,11 +352,11 @@ func (r *Route) Methods(methods ...string) *Route { // // For example: // -// r := mux.NewRouter() -// r.Path("/products/").Handler(ProductsHandler) -// r.Path("/products/{key}").Handler(ProductsHandler) -// r.Path("/articles/{category}/{id:[0-9]+}"). -// Handler(ArticleHandler) +// r := mux.NewRouter().NewRoute() +// r.Path("/products/").Handler(ProductsHandler) +// r.Path("/products/{key}").Handler(ProductsHandler) +// r.Path("/articles/{category}/{id:[0-9]+}"). +// Handler(ArticleHandler) // // Variable names must be unique in a given route. They can be retrieved // calling mux.Vars(request). @@ -377,8 +387,8 @@ func (r *Route) PathPrefix(tpl string) *Route { // It accepts a sequence of key/value pairs. Values may define variables. // For example: // -// r := mux.NewRouter() -// r.Queries("foo", "bar", "id", "{id:[0-9]+}") +// r := mux.NewRouter().NewRoute() +// r.Queries("foo", "bar", "id", "{id:[0-9]+}") // // The above route will only match if the URL contains the defined queries // values, e.g.: ?foo=bar&id=42. @@ -473,11 +483,11 @@ func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route { // // It will test the inner routes only if the parent route matched. For example: // -// r := mux.NewRouter() -// s := r.Host("www.example.com").Subrouter() -// s.HandleFunc("/products/", ProductsHandler) -// s.HandleFunc("/products/{key}", ProductHandler) -// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) +// r := mux.NewRouter().NewRoute() +// s := r.Host("www.example.com").Subrouter() +// s.HandleFunc("/products/", ProductsHandler) +// s.HandleFunc("/products/{key}", ProductHandler) +// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) // // Here, the routes registered in the subrouter won't be tested if the host // doesn't match. @@ -497,36 +507,36 @@ func (r *Route) Subrouter() *Router { // It accepts a sequence of key/value pairs for the route variables. For // example, given this route: // -// r := mux.NewRouter() -// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). -// Name("article") +// r := mux.NewRouter() +// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). +// Name("article") // // ...a URL for it can be built using: // -// url, err := r.Get("article").URL("category", "technology", "id", "42") +// url, err := r.Get("article").URL("category", "technology", "id", "42") // // ...which will return an url.URL with the following path: // -// "/articles/technology/42" +// "/articles/technology/42" // // This also works for host variables: // -// r := mux.NewRouter() -// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). -// Host("{subdomain}.domain.com"). -// Name("article") +// r := mux.NewRouter() +// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). +// Host("{subdomain}.domain.com"). +// Name("article") // -// // url.String() will be "http://news.domain.com/articles/technology/42" -// url, err := r.Get("article").URL("subdomain", "news", -// "category", "technology", -// "id", "42") +// // url.String() will be "http://news.domain.com/articles/technology/42" +// url, err := r.Get("article").URL("subdomain", "news", +// "category", "technology", +// "id", "42") // // The scheme of the resulting url will be the first argument that was passed to Schemes: // -// // url.String() will be "https://example.com" -// r := mux.NewRouter() -// url, err := r.Host("example.com") -// .Schemes("https", "http").URL() +// // url.String() will be "https://example.com" +// r := mux.NewRouter().NewRoute() +// url, err := r.Host("example.com") +// .Schemes("https", "http").URL() // // All variables defined in the route are required, and their values must // conform to the corresponding patterns. @@ -718,6 +728,25 @@ func (r *Route) GetHostTemplate() (string, error) { return r.regexp.host.template, nil } +// GetVarNames returns the names of all variables added by regexp matchers +// These can be used to know which route variables should be passed into r.URL() +func (r *Route) GetVarNames() ([]string, error) { + if r.err != nil { + return nil, r.err + } + var varNames []string + if r.regexp.host != nil { + varNames = append(varNames, r.regexp.host.varsN...) + } + if r.regexp.path != nil { + varNames = append(varNames, r.regexp.path.varsN...) + } + for _, regx := range r.regexp.queries { + varNames = append(varNames, regx.varsN...) + } + return varNames, nil +} + // prepareVars converts the route variable pairs into a map. If the route has a // BuildVarsFunc, it is invoked. func (r *Route) prepareVars(pairs ...string) (map[string]string, error) { diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index 43de4867..7e83f583 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -16,6 +16,14 @@ This package provides various compression algorithms. # changelog +* Oct 22nd, 2023 - [v1.17.2](https://github.com/klauspost/compress/releases/tag/v1.17.2) + * zstd: Fix rare *CORRUPTION* output in "best" mode. See https://github.com/klauspost/compress/pull/876 + +* Oct 14th, 2023 - [v1.17.1](https://github.com/klauspost/compress/releases/tag/v1.17.1) + * s2: Fix S2 "best" dictionary wrong encoding by @klauspost in https://github.com/klauspost/compress/pull/871 + * flate: Reduce allocations in decompressor and minor code improvements by @fakefloordiv in https://github.com/klauspost/compress/pull/869 + * s2: Fix EstimateBlockSize on 6&7 length input by @klauspost in https://github.com/klauspost/compress/pull/867 + * Sept 19th, 2023 - [v1.17.0](https://github.com/klauspost/compress/releases/tag/v1.17.0) * Add experimental dictionary builder https://github.com/klauspost/compress/pull/853 * Add xerial snappy read/writer https://github.com/klauspost/compress/pull/838 diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go index 65d77735..074018d8 100644 --- a/vendor/github.com/klauspost/compress/fse/compress.go +++ b/vendor/github.com/klauspost/compress/fse/compress.go @@ -212,7 +212,7 @@ func (s *Scratch) writeCount() error { previous0 bool charnum uint16 - maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3 + maxHeaderSize = ((int(s.symbolLen)*int(tableLog) + 4 + 2) >> 3) + 3 // Write Table Size bitStream = uint32(tableLog - minTablelog) diff --git a/vendor/github.com/klauspost/compress/huff0/bytereader.go b/vendor/github.com/klauspost/compress/huff0/bytereader.go deleted file mode 100644 index 4dcab8d2..00000000 --- a/vendor/github.com/klauspost/compress/huff0/bytereader.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package huff0 - -// byteReader provides a byte reader that reads -// little endian values from a byte stream. -// The input stream is manually advanced. -// The reader performs no bounds checks. -type byteReader struct { - b []byte - off int -} - -// init will initialize the reader and set the input. -func (b *byteReader) init(in []byte) { - b.b = in - b.off = 0 -} - -// Int32 returns a little endian int32 starting at current offset. -func (b byteReader) Int32() int32 { - v3 := int32(b.b[b.off+3]) - v2 := int32(b.b[b.off+2]) - v1 := int32(b.b[b.off+1]) - v0 := int32(b.b[b.off]) - return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 -} - -// Uint32 returns a little endian uint32 starting at current offset. -func (b byteReader) Uint32() uint32 { - v3 := uint32(b.b[b.off+3]) - v2 := uint32(b.b[b.off+2]) - v1 := uint32(b.b[b.off+1]) - v0 := uint32(b.b[b.off]) - return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 -} - -// remain will return the number of bytes remaining. -func (b byteReader) remain() int { - return len(b.b) - b.off -} diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go index 518436cf..84aa3d12 100644 --- a/vendor/github.com/klauspost/compress/huff0/compress.go +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -350,6 +350,7 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { // Does not update s.clearCount. func (s *Scratch) countSimple(in []byte) (max int, reuse bool) { reuse = true + _ = s.count // Assert that s != nil to speed up the following loop. for _, v := range in { s.count[v]++ } @@ -415,7 +416,7 @@ func (s *Scratch) validateTable(c cTable) bool { // minTableLog provides the minimum logSize to safely represent a distribution. func (s *Scratch) minTableLog() uint8 { - minBitsSrc := highBit32(uint32(s.br.remain())) + 1 + minBitsSrc := highBit32(uint32(s.srcLen)) + 1 minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2 if minBitsSrc < minBitsSymbols { return uint8(minBitsSrc) @@ -427,7 +428,7 @@ func (s *Scratch) minTableLog() uint8 { func (s *Scratch) optimalTableLog() { tableLog := s.TableLog minBits := s.minTableLog() - maxBitsSrc := uint8(highBit32(uint32(s.br.remain()-1))) - 1 + maxBitsSrc := uint8(highBit32(uint32(s.srcLen-1))) - 1 if maxBitsSrc < tableLog { // Accuracy can be reduced tableLog = maxBitsSrc diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go index e8ad17ad..77ecd68e 100644 --- a/vendor/github.com/klauspost/compress/huff0/huff0.go +++ b/vendor/github.com/klauspost/compress/huff0/huff0.go @@ -88,7 +88,7 @@ type Scratch struct { // Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded. MaxDecodedSize int - br byteReader + srcLen int // MaxSymbolValue will override the maximum symbol value of the next block. MaxSymbolValue uint8 @@ -170,7 +170,7 @@ func (s *Scratch) prepare(in []byte) (*Scratch, error) { if s.fse == nil { s.fse = &fse.Scratch{} } - s.br.init(in) + s.srcLen = len(in) return s, nil } diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md index bdd49c8b..92e2347b 100644 --- a/vendor/github.com/klauspost/compress/zstd/README.md +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -259,7 +259,7 @@ nyc-taxi-data-10M.csv gzkp 1 3325605752 922273214 13929 227.68 ## Decompressor -Staus: STABLE - there may still be subtle bugs, but a wide variety of content has been tested. +Status: STABLE - there may still be subtle bugs, but a wide variety of content has been tested. This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz), kindly supplied by [fuzzit.dev](https://fuzzit.dev/). diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go index 858f8f43..c81a1535 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_best.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -43,7 +43,7 @@ func (m *match) estBits(bitsPerByte int32) { if m.rep < 0 { ofc = ofCode(uint32(m.s-m.offset) + 3) } else { - ofc = ofCode(uint32(m.rep)) + ofc = ofCode(uint32(m.rep) & 3) } // Cost, excluding ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc] @@ -227,7 +227,7 @@ encodeLoop: } } l := 4 + e.matchlen(s+4, offset+4, src) - if rep < 0 { + if true { // Extend candidate match backwards as far as possible. tMin := s - e.maxMatchOff if tMin < 0 { @@ -282,6 +282,7 @@ encodeLoop: // Load next and check... e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset} e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset} + index0 := s + 1 // Look far ahead, unless we have a really long match already... if best.length < goodEnough { @@ -357,19 +358,16 @@ encodeLoop: blk.sequences = append(blk.sequences, seq) // Index old s + 1 -> s - 1 - index0 := s + 1 s = best.s + best.length - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, best.length) - } - break encodeLoop - } + // Index skipped... + end := s + if s > sLimit+4 { + end = sLimit + 4 + } off := index0 + e.cur - for index0 < s { + for index0 < end { cv0 := load6432(src, index0) h0 := hashLen(cv0, bestLongTableBits, bestLongLen) h1 := hashLen(cv0, bestShortTableBits, bestShortLen) @@ -378,6 +376,7 @@ encodeLoop: off++ index0++ } + switch best.rep { case 2, 4 | 1: offset1, offset2 = offset2, offset1 @@ -386,12 +385,17 @@ encodeLoop: case 4 | 3: offset1, offset2, offset3 = offset1-1, offset1, offset2 } + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, best.length) + } + break encodeLoop + } continue } // A 4-byte match has been found. Update recent offsets. // We'll later see if more than 4 bytes. - index0 := s + 1 s = best.s t := best.offset offset1, offset2, offset3 = s-t, offset1, offset2 @@ -419,19 +423,25 @@ encodeLoop: } blk.sequences = append(blk.sequences, seq) nextEmit = s - if s >= sLimit { - break encodeLoop + + // Index old s + 1 -> s - 1 or sLimit + end := s + if s > sLimit-4 { + end = sLimit - 4 } - // Index old s + 1 -> s - 1 - for index0 < s { + off := index0 + e.cur + for index0 < end { cv0 := load6432(src, index0) h0 := hashLen(cv0, bestLongTableBits, bestLongLen) h1 := hashLen(cv0, bestShortTableBits, bestShortLen) - off := index0 + e.cur e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} index0++ + off++ + } + if s >= sLimit { + break encodeLoop } } diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go index 8582f31a..20d25b0e 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -145,7 +145,7 @@ encodeLoop: var t int32 // We allow the encoder to optionally turn off repeat offsets across blocks canRepeat := len(blk.sequences) > 2 - var matched int32 + var matched, index0 int32 for { if debugAsserts && canRepeat && offset1 == 0 { @@ -162,6 +162,7 @@ encodeLoop: off := s + e.cur e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} + index0 = s + 1 if canRepeat { if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { @@ -258,7 +259,6 @@ encodeLoop: } blk.sequences = append(blk.sequences, seq) - index0 := s + repOff2 s += lenght + repOff2 nextEmit = s if s >= sLimit { @@ -498,15 +498,15 @@ encodeLoop: } // Index match start+1 (long) -> s - 1 - index0 := s - l + 1 + off := index0 + e.cur for index0 < s-1 { cv0 := load6432(src, index0) cv1 := cv0 >> 8 h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} index0 += 2 + off += 2 } cv = load6432(src, s) @@ -672,7 +672,7 @@ encodeLoop: var t int32 // We allow the encoder to optionally turn off repeat offsets across blocks canRepeat := len(blk.sequences) > 2 - var matched int32 + var matched, index0 int32 for { if debugAsserts && canRepeat && offset1 == 0 { @@ -691,6 +691,7 @@ encodeLoop: e.markLongShardDirty(nextHashL) e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} e.markShortShardDirty(nextHashS) + index0 = s + 1 if canRepeat { if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { @@ -726,7 +727,6 @@ encodeLoop: blk.sequences = append(blk.sequences, seq) // Index match start+1 (long) -> s - 1 - index0 := s + repOff s += lenght + repOff nextEmit = s @@ -790,7 +790,6 @@ encodeLoop: } blk.sequences = append(blk.sequences, seq) - index0 := s + repOff2 s += lenght + repOff2 nextEmit = s if s >= sLimit { @@ -1024,18 +1023,18 @@ encodeLoop: } // Index match start+1 (long) -> s - 1 - index0 := s - l + 1 + off := index0 + e.cur for index0 < s-1 { cv0 := load6432(src, index0) cv1 := cv0 >> 8 h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} e.markLongShardDirty(h0) h1 := hashLen(cv1, betterShortTableBits, betterShortLen) e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} e.markShortShardDirty(h1) index0 += 2 + off += 2 } cv = load6432(src, s) diff --git a/vendor/github.com/letsencrypt/boulder/core/interfaces.go b/vendor/github.com/letsencrypt/boulder/core/interfaces.go index 003329c3..59b55a3f 100644 --- a/vendor/github.com/letsencrypt/boulder/core/interfaces.go +++ b/vendor/github.com/letsencrypt/boulder/core/interfaces.go @@ -7,7 +7,7 @@ import ( // PolicyAuthority defines the public interface for the Boulder PA // TODO(#5891): Move this interface to a more appropriate location. type PolicyAuthority interface { - WillingToIssueWildcards([]identifier.ACMEIdentifier) error + WillingToIssue([]string) error ChallengesFor(identifier.ACMEIdentifier) ([]Challenge, error) ChallengeTypeEnabled(AcmeChallenge) bool CheckAuthz(*Authorization) error diff --git a/vendor/github.com/letsencrypt/boulder/core/util.go b/vendor/github.com/letsencrypt/boulder/core/util.go index d7fe0266..e135dec7 100644 --- a/vendor/github.com/letsencrypt/boulder/core/util.go +++ b/vendor/github.com/letsencrypt/boulder/core/util.go @@ -25,6 +25,8 @@ import ( "time" "unicode" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" "gopkg.in/go-jose/go-jose.v2" ) @@ -212,10 +214,83 @@ func IsAnyNilOrZero(vals ...interface{}) bool { switch v := val.(type) { case nil: return true + case bool: + if !v { + return true + } + case string: + if v == "" { + return true + } + case []string: + if len(v) == 0 { + return true + } + case byte: + // Byte is an alias for uint8 and will cover that case. + if v == 0 { + return true + } case []byte: if len(v) == 0 { return true } + case int: + if v == 0 { + return true + } + case int8: + if v == 0 { + return true + } + case int16: + if v == 0 { + return true + } + case int32: + if v == 0 { + return true + } + case int64: + if v == 0 { + return true + } + case uint: + if v == 0 { + return true + } + case uint16: + if v == 0 { + return true + } + case uint32: + if v == 0 { + return true + } + case uint64: + if v == 0 { + return true + } + case float32: + if v == 0 { + return true + } + case float64: + if v == 0 { + return true + } + case time.Time: + if v.IsZero() { + return true + } + case *timestamppb.Timestamp: + if v == nil || v.AsTime().IsZero() { + return true + } + case *durationpb.Duration: + if v == nil || v.AsDuration() == time.Duration(0) { + return true + } default: if reflect.ValueOf(v).IsZero() { return true diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3.go b/vendor/github.com/mattn/go-sqlite3/sqlite3.go index 5e4e2ff5..57641823 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3.go @@ -21,7 +21,6 @@ package sqlite3 #cgo CFLAGS: -DSQLITE_DEFAULT_WAL_SYNCHRONOUS=1 #cgo CFLAGS: -DSQLITE_ENABLE_UPDATE_DELETE_LIMIT #cgo CFLAGS: -Wno-deprecated-declarations -#cgo linux,!android CFLAGS: -DHAVE_PREAD64=1 -DHAVE_PWRITE64=1 #cgo openbsd CFLAGS: -I/usr/local/include #cgo openbsd LDFLAGS: -L/usr/local/lib #ifndef USE_LIBSQLITE3 @@ -48,6 +47,18 @@ package sqlite3 # define SQLITE_DETERMINISTIC 0 #endif +#if defined(HAVE_PREAD64) && defined(HAVE_PWRITE64) +# undef USE_PREAD +# undef USE_PWRITE +# define USE_PREAD64 1 +# define USE_PWRITE64 1 +#elif defined(HAVE_PREAD) && defined(HAVE_PWRITE) +# undef USE_PREAD +# undef USE_PWRITE +# define USE_PREAD64 1 +# define USE_PWRITE64 1 +#endif + static int _sqlite3_open_v2(const char *filename, sqlite3 **ppDb, int flags, const char *zVfs) { #ifdef SQLITE_OPEN_URI diff --git a/vendor/github.com/moby/buildkit/AUTHORS b/vendor/github.com/moby/buildkit/AUTHORS new file mode 100644 index 00000000..c1dce655 --- /dev/null +++ b/vendor/github.com/moby/buildkit/AUTHORS @@ -0,0 +1,66 @@ +# This file lists all individuals having contributed content to the repository. +# For how it is generated, see `scripts/generate-authors.sh`. + +Aaron L. Xu +Aaron Lehmann +Akihiro Suda +Alexander Morozov +Alice Frosi +Allen Sun +Anda Xu +Anthony Sottile +Arnaud Bailly +Bin Liu +Brian Goff +Daniel Nephin +Dave Chen +David Calavera +Dennis Chen +Derek McGowan +Doug Davis +Edgar Lee +Eli Uriegas +f0 +Fernando Miguel +Hao Hu +Helen Xie +Himanshu Pandey +Hiromu Nakamura +Ian Campbell +Iskander (Alex) Sharipov +Jean-Pierre Huynh +Jessica Frazelle +John Howard +Jonathan Stoppani +Justas Brazauskas +Justin Cormack +Kunal Kushwaha +Lajos Papp +Matt Rickard +Michael Crosby +Miyachi Katsuya +Nao YONASHIRO +Natasha Jarus +Noel Georgi <18496730+frezbo@users.noreply.github.com> +Ondrej Fabry +Patrick Van Stee +Ri Xu +Sebastiaan van Stijn +Shev Yan +Simon Ferquel +Stefan Weil +Thomas Leonard +Thomas Shaw +Tibor Vass +Tiffany Jernigan +Tino Rusch +Tobias Klauser +Tomas Tomecek +Tomohiro Kusumoto +Tõnis Tiigi +Vincent Demeester +Wei Fu +Yong Tang +Yuichiro Kaneko +Ziv Tsarfati +郑泽宇 diff --git a/vendor/github.com/moby/buildkit/LICENSE b/vendor/github.com/moby/buildkit/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/moby/buildkit/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/command/command.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/command/command.go new file mode 100644 index 00000000..f23c6874 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/command/command.go @@ -0,0 +1,46 @@ +// Package command contains the set of Dockerfile commands. +package command + +// Define constants for the command strings +const ( + Add = "add" + Arg = "arg" + Cmd = "cmd" + Copy = "copy" + Entrypoint = "entrypoint" + Env = "env" + Expose = "expose" + From = "from" + Healthcheck = "healthcheck" + Label = "label" + Maintainer = "maintainer" + Onbuild = "onbuild" + Run = "run" + Shell = "shell" + StopSignal = "stopsignal" + User = "user" + Volume = "volume" + Workdir = "workdir" +) + +// Commands is list of all Dockerfile commands +var Commands = map[string]struct{}{ + Add: {}, + Arg: {}, + Cmd: {}, + Copy: {}, + Entrypoint: {}, + Env: {}, + Expose: {}, + From: {}, + Healthcheck: {}, + Label: {}, + Maintainer: {}, + Onbuild: {}, + Run: {}, + Shell: {}, + StopSignal: {}, + User: {}, + Volume: {}, + Workdir: {}, +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/directives.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/directives.go new file mode 100644 index 00000000..db1668f2 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/directives.go @@ -0,0 +1,171 @@ +package parser + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "regexp" + "strings" + + "github.com/pkg/errors" +) + +const ( + keySyntax = "syntax" + keyEscape = "escape" +) + +var validDirectives = map[string]struct{}{ + keySyntax: {}, + keyEscape: {}, +} + +type Directive struct { + Name string + Value string + Location []Range +} + +// DirectiveParser is a parser for Dockerfile directives that enforces the +// quirks of the directive parser. +type DirectiveParser struct { + line int + regexp *regexp.Regexp + seen map[string]struct{} + done bool +} + +func (d *DirectiveParser) setComment(comment string) { + d.regexp = regexp.MustCompile(fmt.Sprintf(`^%s\s*([a-zA-Z][a-zA-Z0-9]*)\s*=\s*(.+?)\s*$`, comment)) +} + +func (d *DirectiveParser) ParseLine(line []byte) (*Directive, error) { + d.line++ + if d.done { + return nil, nil + } + if d.regexp == nil { + d.setComment("#") + } + + match := d.regexp.FindSubmatch(line) + if len(match) == 0 { + d.done = true + return nil, nil + } + + k := strings.ToLower(string(match[1])) + if _, ok := validDirectives[k]; !ok { + d.done = true + return nil, nil + } + if d.seen == nil { + d.seen = map[string]struct{}{} + } + if _, ok := d.seen[k]; ok { + return nil, errors.Errorf("only one %s parser directive can be used", k) + } + d.seen[k] = struct{}{} + + v := string(match[2]) + + directive := Directive{ + Name: k, + Value: v, + Location: []Range{{ + Start: Position{Line: d.line}, + End: Position{Line: d.line}, + }}, + } + return &directive, nil +} + +func (d *DirectiveParser) ParseAll(data []byte) ([]*Directive, error) { + scanner := bufio.NewScanner(bytes.NewReader(data)) + var directives []*Directive + for scanner.Scan() { + if d.done { + break + } + + d, err := d.ParseLine(scanner.Bytes()) + if err != nil { + return directives, err + } + if d != nil { + directives = append(directives, d) + } + } + return directives, nil +} + +// DetectSyntax returns the syntax of provided input. +// +// The traditional dockerfile directives '# syntax = ...' are used by default, +// however, the function will also fallback to c-style directives '// syntax = ...' +// and json-encoded directives '{ "syntax": "..." }'. Finally, starting lines +// with '#!' are treated as shebangs and ignored. +// +// This allows for a flexible range of input formats, and appropriate syntax +// selection. +func DetectSyntax(dt []byte) (string, string, []Range, bool) { + dt, hadShebang, err := discardShebang(dt) + if err != nil { + return "", "", nil, false + } + line := 0 + if hadShebang { + line++ + } + + // use default directive parser, and search for #syntax= + directiveParser := DirectiveParser{line: line} + if syntax, cmdline, loc, ok := detectSyntaxFromParser(dt, directiveParser); ok { + return syntax, cmdline, loc, true + } + + // use directive with different comment prefix, and search for //syntax= + directiveParser = DirectiveParser{line: line} + directiveParser.setComment("//") + if syntax, cmdline, loc, ok := detectSyntaxFromParser(dt, directiveParser); ok { + return syntax, cmdline, loc, true + } + + // search for possible json directives + var directive struct { + Syntax string `json:"syntax"` + } + if err := json.Unmarshal(dt, &directive); err == nil { + if directive.Syntax != "" { + loc := []Range{{ + Start: Position{Line: line}, + End: Position{Line: line}, + }} + return directive.Syntax, directive.Syntax, loc, true + } + } + + return "", "", nil, false +} + +func detectSyntaxFromParser(dt []byte, parser DirectiveParser) (string, string, []Range, bool) { + directives, _ := parser.ParseAll(dt) + for _, d := range directives { + // check for syntax directive before erroring out, since the error + // might have occurred *after* the syntax directive + if d.Name == keySyntax { + p, _, _ := strings.Cut(d.Value, " ") + return p, d.Value, d.Location, true + } + } + return "", "", nil, false +} + +func discardShebang(dt []byte) ([]byte, bool, error) { + line, rest, _ := bytes.Cut(dt, []byte("\n")) + if bytes.HasPrefix(line, []byte("#!")) { + return rest, true, nil + } + return dt, false, nil +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/errors.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/errors.go new file mode 100644 index 00000000..9f28a5a2 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/errors.go @@ -0,0 +1,58 @@ +package parser + +import ( + "github.com/moby/buildkit/util/stack" + "github.com/pkg/errors" +) + +// ErrorLocation gives a location in source code that caused the error +type ErrorLocation struct { + Location []Range + error +} + +// Unwrap unwraps to the next error +func (e *ErrorLocation) Unwrap() error { + return e.error +} + +// Range is a code section between two positions +type Range struct { + Start Position + End Position +} + +// Position is a point in source code +type Position struct { + Line int + Character int +} + +func withLocation(err error, start, end int) error { + return WithLocation(err, toRanges(start, end)) +} + +// WithLocation extends an error with a source code location +func WithLocation(err error, location []Range) error { + if err == nil { + return nil + } + var el *ErrorLocation + if errors.As(err, &el) { + return err + } + return stack.Enable(&ErrorLocation{ + error: err, + Location: location, + }) +} + +func toRanges(start, end int) (r []Range) { + if end <= start { + end = start + } + for i := start; i <= end; i++ { + r = append(r, Range{Start: Position{Line: i}, End: Position{Line: i}}) + } + return +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/line_parsers.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/line_parsers.go new file mode 100644 index 00000000..db8d0bda --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/line_parsers.go @@ -0,0 +1,367 @@ +package parser + +// line parsers are dispatch calls that parse a single unit of text into a +// Node object which contains the whole statement. Dockerfiles have varied +// (but not usually unique, see ONBUILD for a unique example) parsing rules +// per-command, and these unify the processing in a way that makes it +// manageable. + +import ( + "encoding/json" + "strings" + "unicode" + "unicode/utf8" + + "github.com/pkg/errors" +) + +var ( + errDockerfileNotStringArray = errors.New("when using JSON array syntax, arrays must be comprised of strings only") +) + +const ( + commandLabel = "LABEL" +) + +// ignore the current argument. This will still leave a command parsed, but +// will not incorporate the arguments into the ast. +func parseIgnore(rest string, d *directives) (*Node, map[string]bool, error) { + return &Node{}, nil, nil +} + +// used for onbuild. Could potentially be used for anything that represents a +// statement with sub-statements. +// +// ONBUILD RUN foo bar -> (onbuild (run foo bar)) +func parseSubCommand(rest string, d *directives) (*Node, map[string]bool, error) { + if rest == "" { + return nil, nil, nil + } + + child, err := newNodeFromLine(rest, d, nil) + if err != nil { + return nil, nil, err + } + + return &Node{Children: []*Node{child}}, nil, nil +} + +// helper to parse words (i.e space delimited or quoted strings) in a statement. +// The quotes are preserved as part of this function and they are stripped later +// as part of processWords(). +func parseWords(rest string, d *directives) []string { + const ( + inSpaces = iota // looking for start of a word + inWord + inQuote + ) + + words := []string{} + phase := inSpaces + word := "" + quote := '\000' + blankOK := false + var ch rune + var chWidth int + + for pos := 0; pos <= len(rest); pos += chWidth { + if pos != len(rest) { + ch, chWidth = utf8.DecodeRuneInString(rest[pos:]) + } + + if phase == inSpaces { // Looking for start of word + if pos == len(rest) { // end of input + break + } + if unicode.IsSpace(ch) { // skip spaces + continue + } + phase = inWord // found it, fall through + } + if (phase == inWord || phase == inQuote) && (pos == len(rest)) { + if blankOK || len(word) > 0 { + words = append(words, word) + } + break + } + if phase == inWord { + if unicode.IsSpace(ch) { + phase = inSpaces + if blankOK || len(word) > 0 { + words = append(words, word) + } + word = "" + blankOK = false + continue + } + if ch == '\'' || ch == '"' { + quote = ch + blankOK = true + phase = inQuote + } + if ch == d.escapeToken { + if pos+chWidth == len(rest) { + continue // just skip an escape token at end of line + } + // If we're not quoted and we see an escape token, then always just + // add the escape token plus the char to the word, even if the char + // is a quote. + word += string(ch) + pos += chWidth + ch, chWidth = utf8.DecodeRuneInString(rest[pos:]) + } + word += string(ch) + continue + } + if phase == inQuote { + if ch == quote { + phase = inWord + } + // The escape token is special except for ' quotes - can't escape anything for ' + if ch == d.escapeToken && quote != '\'' { + if pos+chWidth == len(rest) { + phase = inWord + continue // just skip the escape token at end + } + pos += chWidth + word += string(ch) + ch, chWidth = utf8.DecodeRuneInString(rest[pos:]) + } + word += string(ch) + } + } + + return words +} + +// parse environment like statements. Note that this does *not* handle +// variable interpolation, which will be handled in the evaluator. +func parseNameVal(rest string, key string, d *directives) (*Node, error) { + // This is kind of tricky because we need to support the old + // variant: KEY name value + // as well as the new one: KEY name=value ... + // The trigger to know which one is being used will be whether we hit + // a space or = first. space ==> old, "=" ==> new + + words := parseWords(rest, d) + if len(words) == 0 { + return nil, nil + } + + // Old format (KEY name value) + if !strings.Contains(words[0], "=") { + parts := reWhitespace.Split(rest, 2) + if len(parts) < 2 { + return nil, errors.Errorf("%s must have two arguments", key) + } + return newKeyValueNode(parts[0], parts[1]), nil + } + + var rootNode *Node + var prevNode *Node + for _, word := range words { + if !strings.Contains(word, "=") { + return nil, errors.Errorf("Syntax error - can't find = in %q. Must be of the form: name=value", word) + } + + parts := strings.SplitN(word, "=", 2) + node := newKeyValueNode(parts[0], parts[1]) + rootNode, prevNode = appendKeyValueNode(node, rootNode, prevNode) + } + + return rootNode, nil +} + +func newKeyValueNode(key, value string) *Node { + return &Node{ + Value: key, + Next: &Node{Value: value}, + } +} + +func appendKeyValueNode(node, rootNode, prevNode *Node) (*Node, *Node) { + if rootNode == nil { + rootNode = node + } + if prevNode != nil { + prevNode.Next = node + } + + prevNode = node.Next + return rootNode, prevNode +} + +func parseEnv(rest string, d *directives) (*Node, map[string]bool, error) { + node, err := parseNameVal(rest, "ENV", d) + return node, nil, err +} + +func parseLabel(rest string, d *directives) (*Node, map[string]bool, error) { + node, err := parseNameVal(rest, commandLabel, d) + return node, nil, err +} + +// parses a statement containing one or more keyword definition(s) and/or +// value assignments, like `name1 name2= name3="" name4=value`. +// Note that this is a stricter format than the old format of assignment, +// allowed by parseNameVal(), in a way that this only allows assignment of the +// form `keyword=[]` like `name2=`, `name3=""`, and `name4=value` above. +// In addition, a keyword definition alone is of the form `keyword` like `name1` +// above. And the assignments `name2=` and `name3=""` are equivalent and +// assign an empty value to the respective keywords. +func parseNameOrNameVal(rest string, d *directives) (*Node, map[string]bool, error) { + words := parseWords(rest, d) + if len(words) == 0 { + return nil, nil, nil + } + + var ( + rootnode *Node + prevNode *Node + ) + for i, word := range words { + node := &Node{} + node.Value = word + if i == 0 { + rootnode = node + } else { + prevNode.Next = node + } + prevNode = node + } + + return rootnode, nil, nil +} + +// parses a whitespace-delimited set of arguments. The result is effectively a +// linked list of string arguments. +func parseStringsWhitespaceDelimited(rest string, d *directives) (*Node, map[string]bool, error) { + if rest == "" { + return nil, nil, nil + } + + node := &Node{} + rootnode := node + prevnode := node + for _, str := range reWhitespace.Split(rest, -1) { // use regexp + prevnode = node + node.Value = str + node.Next = &Node{} + node = node.Next + } + + // XXX to get around regexp.Split *always* providing an empty string at the + // end due to how our loop is constructed, nil out the last node in the + // chain. + prevnode.Next = nil + + return rootnode, nil, nil +} + +// parseString just wraps the string in quotes and returns a working node. +func parseString(rest string, d *directives) (*Node, map[string]bool, error) { + if rest == "" { + return nil, nil, nil + } + n := &Node{} + n.Value = rest + return n, nil, nil +} + +// parseJSON converts JSON arrays to an AST. +func parseJSON(rest string, d *directives) (*Node, map[string]bool, error) { + rest = strings.TrimLeftFunc(rest, unicode.IsSpace) + if !strings.HasPrefix(rest, "[") { + return nil, nil, errors.Errorf("Error parsing %q as a JSON array", rest) + } + + var myJSON []interface{} + if err := json.NewDecoder(strings.NewReader(rest)).Decode(&myJSON); err != nil { + return nil, nil, err + } + + var top, prev *Node + for _, str := range myJSON { + s, ok := str.(string) + if !ok { + return nil, nil, errDockerfileNotStringArray + } + + node := &Node{Value: s} + if prev == nil { + top = node + } else { + prev.Next = node + } + prev = node + } + + return top, map[string]bool{"json": true}, nil +} + +// parseMaybeJSON determines if the argument appears to be a JSON array. If +// so, passes to parseJSON; if not, quotes the result and returns a single +// node. +func parseMaybeJSON(rest string, d *directives) (*Node, map[string]bool, error) { + if rest == "" { + return nil, nil, nil + } + + node, attrs, err := parseJSON(rest, d) + + if err == nil { + return node, attrs, nil + } + if err == errDockerfileNotStringArray { + return nil, nil, err + } + + node = &Node{} + node.Value = rest + return node, nil, nil +} + +// parseMaybeJSONToList determines if the argument appears to be a JSON array. If +// so, passes to parseJSON; if not, attempts to parse it as a whitespace +// delimited string. +func parseMaybeJSONToList(rest string, d *directives) (*Node, map[string]bool, error) { + node, attrs, err := parseJSON(rest, d) + + if err == nil { + return node, attrs, nil + } + if err == errDockerfileNotStringArray { + return nil, nil, err + } + + return parseStringsWhitespaceDelimited(rest, d) +} + +// The HEALTHCHECK command is like parseMaybeJSON, but has an extra type argument. +func parseHealthConfig(rest string, d *directives) (*Node, map[string]bool, error) { + // Find end of first argument + var sep int + for ; sep < len(rest); sep++ { + if unicode.IsSpace(rune(rest[sep])) { + break + } + } + next := sep + for ; next < len(rest); next++ { + if !unicode.IsSpace(rune(rest[next])) { + break + } + } + + if sep == 0 { + return nil, nil, nil + } + + typ := rest[:sep] + cmd, attrs, err := parseMaybeJSON(rest[next:], d) + if err != nil { + return nil, nil, err + } + + return &Node{Value: typ, Next: cmd}, attrs, err +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go new file mode 100644 index 00000000..4a6129fd --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go @@ -0,0 +1,552 @@ +// The parser package implements a parser that transforms a raw byte-stream +// into a low-level Abstract Syntax Tree. +package parser + +import ( + "bufio" + "bytes" + "fmt" + "io" + "regexp" + "strconv" + "strings" + "unicode" + + "github.com/moby/buildkit/frontend/dockerfile/command" + "github.com/moby/buildkit/frontend/dockerfile/shell" + "github.com/pkg/errors" +) + +// Node is a structure used to represent a parse tree. +// +// In the node there are three fields, Value, Next, and Children. Value is the +// current token's string value. Next is always the next non-child token, and +// children contains all the children. Here's an example: +// +// (value next (child child-next child-next-next) next-next) +// +// This data structure is frankly pretty lousy for handling complex languages, +// but lucky for us the Dockerfile isn't very complicated. This structure +// works a little more effectively than a "proper" parse tree for our needs. +type Node struct { + Value string // actual content + Next *Node // the next item in the current sexp + Children []*Node // the children of this sexp + Heredocs []Heredoc // extra heredoc content attachments + Attributes map[string]bool // special attributes for this node + Original string // original line used before parsing + Flags []string // only top Node should have this set + StartLine int // the line in the original dockerfile where the node begins + EndLine int // the line in the original dockerfile where the node ends + PrevComment []string +} + +// Location return the location of node in source code +func (node *Node) Location() []Range { + return toRanges(node.StartLine, node.EndLine) +} + +// Dump dumps the AST defined by `node` as a list of sexps. +// Returns a string suitable for printing. +func (node *Node) Dump() string { + str := strings.ToLower(node.Value) + + if len(node.Flags) > 0 { + str += fmt.Sprintf(" %q", node.Flags) + } + + for _, n := range node.Children { + str += "(" + n.Dump() + ")\n" + } + + for n := node.Next; n != nil; n = n.Next { + if len(n.Children) > 0 { + str += " " + n.Dump() + } else { + str += " " + strconv.Quote(n.Value) + } + } + + return strings.TrimSpace(str) +} + +func (node *Node) lines(start, end int) { + node.StartLine = start + node.EndLine = end +} + +func (node *Node) canContainHeredoc() bool { + // check for compound commands, like ONBUILD + if ok := heredocCompoundDirectives[strings.ToLower(node.Value)]; ok { + if node.Next != nil && len(node.Next.Children) > 0 { + node = node.Next.Children[0] + } + } + + if ok := heredocDirectives[strings.ToLower(node.Value)]; !ok { + return false + } + if isJSON := node.Attributes["json"]; isJSON { + return false + } + + return true +} + +// AddChild adds a new child node, and updates line information +func (node *Node) AddChild(child *Node, startLine, endLine int) { + child.lines(startLine, endLine) + if node.StartLine < 0 { + node.StartLine = startLine + } + node.EndLine = endLine + node.Children = append(node.Children, child) +} + +type Heredoc struct { + Name string + FileDescriptor uint + Expand bool + Chomp bool + Content string +} + +var ( + dispatch map[string]func(string, *directives) (*Node, map[string]bool, error) + reWhitespace = regexp.MustCompile(`[\t\v\f\r ]+`) + reComment = regexp.MustCompile(`^#.*$`) + reHeredoc = regexp.MustCompile(`^(\d*)<<(-?)([^<]*)$`) + reLeadingTabs = regexp.MustCompile(`(?m)^\t+`) +) + +// DefaultEscapeToken is the default escape token +const DefaultEscapeToken = '\\' + +var ( + // Directives allowed to contain heredocs + heredocDirectives = map[string]bool{ + command.Add: true, + command.Copy: true, + command.Run: true, + } + + // Directives allowed to contain directives containing heredocs + heredocCompoundDirectives = map[string]bool{ + command.Onbuild: true, + } +) + +// directives is the structure used during a build run to hold the state of +// parsing directives. +type directives struct { + parser DirectiveParser + escapeToken rune // Current escape token + lineContinuationRegex *regexp.Regexp // Current line continuation regex +} + +// setEscapeToken sets the default token for escaping characters and as line- +// continuation token in a Dockerfile. Only ` (backtick) and \ (backslash) are +// allowed as token. +func (d *directives) setEscapeToken(s string) error { + if s != "`" && s != `\` { + return errors.Errorf("invalid escape token '%s' does not match ` or \\", s) + } + d.escapeToken = rune(s[0]) + // The escape token is used both to escape characters in a line and as line + // continuation token. If it's the last non-whitespace token, it is used as + // line-continuation token, *unless* preceded by an escape-token. + // + // The second branch in the regular expression handles line-continuation + // tokens on their own line, which don't have any character preceding them. + // + // Due to Go lacking negative look-ahead matching, this regular expression + // does not currently handle a line-continuation token preceded by an *escaped* + // escape-token ("foo \\\"). + d.lineContinuationRegex = regexp.MustCompile(`([^\` + s + `])\` + s + `[ \t]*$|^\` + s + `[ \t]*$`) + return nil +} + +// possibleParserDirective looks for parser directives, eg '# escapeToken='. +// Parser directives must precede any builder instruction or other comments, +// and cannot be repeated. +func (d *directives) possibleParserDirective(line string) error { + directive, err := d.parser.ParseLine([]byte(line)) + if err != nil { + return err + } + if directive != nil && directive.Name == keyEscape { + return d.setEscapeToken(directive.Value) + } + return nil +} + +// newDefaultDirectives returns a new directives structure with the default escapeToken token +func newDefaultDirectives() *directives { + d := &directives{} + d.setEscapeToken(string(DefaultEscapeToken)) + return d +} + +func init() { + // Dispatch Table. see line_parsers.go for the parse functions. + // The command is parsed and mapped to the line parser. The line parser + // receives the arguments but not the command, and returns an AST after + // reformulating the arguments according to the rules in the parser + // functions. Errors are propagated up by Parse() and the resulting AST can + // be incorporated directly into the existing AST as a next. + dispatch = map[string]func(string, *directives) (*Node, map[string]bool, error){ + command.Add: parseMaybeJSONToList, + command.Arg: parseNameOrNameVal, + command.Cmd: parseMaybeJSON, + command.Copy: parseMaybeJSONToList, + command.Entrypoint: parseMaybeJSON, + command.Env: parseEnv, + command.Expose: parseStringsWhitespaceDelimited, + command.From: parseStringsWhitespaceDelimited, + command.Healthcheck: parseHealthConfig, + command.Label: parseLabel, + command.Maintainer: parseString, + command.Onbuild: parseSubCommand, + command.Run: parseMaybeJSON, + command.Shell: parseMaybeJSON, + command.StopSignal: parseString, + command.User: parseString, + command.Volume: parseMaybeJSONToList, + command.Workdir: parseString, + } +} + +// newNodeFromLine splits the line into parts, and dispatches to a function +// based on the command and command arguments. A Node is created from the +// result of the dispatch. +func newNodeFromLine(line string, d *directives, comments []string) (*Node, error) { + cmd, flags, args, err := splitCommand(line) + if err != nil { + return nil, err + } + + fn := dispatch[strings.ToLower(cmd)] + // Ignore invalid Dockerfile instructions + if fn == nil { + fn = parseIgnore + } + next, attrs, err := fn(args, d) + if err != nil { + return nil, err + } + + return &Node{ + Value: cmd, + Original: line, + Flags: flags, + Next: next, + Attributes: attrs, + PrevComment: comments, + }, nil +} + +// Result contains the bundled outputs from parsing a Dockerfile. +type Result struct { + AST *Node + EscapeToken rune + Warnings []Warning +} + +// Warning contains information to identify and locate a warning generated +// during parsing. +type Warning struct { + Short string + Detail [][]byte + URL string + Location *Range +} + +// PrintWarnings to the writer +func (r *Result) PrintWarnings(out io.Writer) { + if len(r.Warnings) == 0 { + return + } + for _, w := range r.Warnings { + fmt.Fprintf(out, "[WARNING]: %s\n", w.Short) + } + if len(r.Warnings) > 0 { + fmt.Fprintf(out, "[WARNING]: Empty continuation lines will become errors in a future release.\n") + } +} + +// Parse consumes lines from a provided Reader, parses each line into an AST +// and returns the results of doing so. +func Parse(rwc io.Reader) (*Result, error) { + d := newDefaultDirectives() + currentLine := 0 + root := &Node{StartLine: -1} + scanner := bufio.NewScanner(rwc) + scanner.Split(scanLines) + warnings := []Warning{} + var comments []string + + var err error + for scanner.Scan() { + bytesRead := scanner.Bytes() + if currentLine == 0 { + // First line, strip the byte-order-marker if present + bytesRead = bytes.TrimPrefix(bytesRead, utf8bom) + } + if isComment(bytesRead) { + comment := strings.TrimSpace(string(bytesRead[1:])) + if comment == "" { + comments = nil + } else { + comments = append(comments, comment) + } + } + bytesRead, err = processLine(d, bytesRead, true) + if err != nil { + return nil, withLocation(err, currentLine, 0) + } + currentLine++ + + startLine := currentLine + line, isEndOfLine := trimContinuationCharacter(string(bytesRead), d) + if isEndOfLine && line == "" { + continue + } + + var hasEmptyContinuationLine bool + for !isEndOfLine && scanner.Scan() { + bytesRead, err := processLine(d, scanner.Bytes(), false) + if err != nil { + return nil, withLocation(err, currentLine, 0) + } + currentLine++ + + if isComment(scanner.Bytes()) { + // original line was a comment (processLine strips comments) + continue + } + if isEmptyContinuationLine(bytesRead) { + hasEmptyContinuationLine = true + continue + } + + continuationLine := string(bytesRead) + continuationLine, isEndOfLine = trimContinuationCharacter(continuationLine, d) + line += continuationLine + } + + if hasEmptyContinuationLine { + warnings = append(warnings, Warning{ + Short: "Empty continuation line found in: " + line, + Detail: [][]byte{[]byte("Empty continuation lines will become errors in a future release")}, + URL: "https://github.com/moby/moby/pull/33719", + Location: &Range{Start: Position{Line: currentLine}, End: Position{Line: currentLine}}, + }) + } + + child, err := newNodeFromLine(line, d, comments) + if err != nil { + return nil, withLocation(err, startLine, currentLine) + } + + if child.canContainHeredoc() { + heredocs, err := heredocsFromLine(line) + if err != nil { + return nil, withLocation(err, startLine, currentLine) + } + + for _, heredoc := range heredocs { + terminator := []byte(heredoc.Name) + terminated := false + for scanner.Scan() { + bytesRead := scanner.Bytes() + currentLine++ + + possibleTerminator := trimNewline(bytesRead) + if heredoc.Chomp { + possibleTerminator = trimLeadingTabs(possibleTerminator) + } + if bytes.Equal(possibleTerminator, terminator) { + terminated = true + break + } + heredoc.Content += string(bytesRead) + } + if !terminated { + return nil, withLocation(errors.New("unterminated heredoc"), startLine, currentLine) + } + + child.Heredocs = append(child.Heredocs, heredoc) + } + } + + root.AddChild(child, startLine, currentLine) + comments = nil + } + + if root.StartLine < 0 { + return nil, withLocation(errors.New("file with no instructions"), currentLine, 0) + } + + return &Result{ + AST: root, + Warnings: warnings, + EscapeToken: d.escapeToken, + }, withLocation(handleScannerError(scanner.Err()), currentLine, 0) +} + +// heredocFromMatch extracts a heredoc from a possible heredoc regex match. +func heredocFromMatch(match []string) (*Heredoc, error) { + if len(match) == 0 { + return nil, nil + } + + fd, _ := strconv.ParseUint(match[1], 10, 0) + chomp := match[2] == "-" + rest := match[3] + + if len(rest) == 0 { + return nil, nil + } + + shlex := shell.NewLex('\\') + shlex.SkipUnsetEnv = true + + // Attempt to parse both the heredoc both with *and* without quotes. + // If there are quotes in one but not the other, then we know that some + // part of the heredoc word is quoted, so we shouldn't expand the content. + shlex.RawQuotes = false + words, err := shlex.ProcessWords(rest, []string{}) + if err != nil { + return nil, err + } + // quick sanity check that rest is a single word + if len(words) != 1 { + return nil, nil + } + + shlex.RawQuotes = true + wordsRaw, err := shlex.ProcessWords(rest, []string{}) + if err != nil { + return nil, err + } + if len(wordsRaw) != len(words) { + return nil, errors.Errorf("internal lexing of heredoc produced inconsistent results: %s", rest) + } + + word := words[0] + wordQuoteCount := strings.Count(word, `'`) + strings.Count(word, `"`) + wordRaw := wordsRaw[0] + wordRawQuoteCount := strings.Count(wordRaw, `'`) + strings.Count(wordRaw, `"`) + + expand := wordQuoteCount == wordRawQuoteCount + + return &Heredoc{ + Name: word, + Expand: expand, + Chomp: chomp, + FileDescriptor: uint(fd), + }, nil +} + +// ParseHeredoc parses a heredoc word from a target string, returning the +// components from the doc. +func ParseHeredoc(src string) (*Heredoc, error) { + return heredocFromMatch(reHeredoc.FindStringSubmatch(src)) +} + +// MustParseHeredoc is a variant of ParseHeredoc that discards the error, if +// there was one present. +func MustParseHeredoc(src string) *Heredoc { + heredoc, _ := ParseHeredoc(src) + return heredoc +} + +func heredocsFromLine(line string) ([]Heredoc, error) { + shlex := shell.NewLex('\\') + shlex.RawQuotes = true + shlex.RawEscapes = true + shlex.SkipUnsetEnv = true + words, _ := shlex.ProcessWords(line, []string{}) + + var docs []Heredoc + for _, word := range words { + heredoc, err := ParseHeredoc(word) + if err != nil { + return nil, err + } + if heredoc != nil { + docs = append(docs, *heredoc) + } + } + return docs, nil +} + +// ChompHeredocContent chomps leading tabs from the heredoc. +func ChompHeredocContent(src string) string { + return reLeadingTabs.ReplaceAllString(src, "") +} + +func trimComments(src []byte) []byte { + return reComment.ReplaceAll(src, []byte{}) +} + +func trimLeadingWhitespace(src []byte) []byte { + return bytes.TrimLeftFunc(src, unicode.IsSpace) +} +func trimLeadingTabs(src []byte) []byte { + return bytes.TrimLeft(src, "\t") +} +func trimNewline(src []byte) []byte { + return bytes.TrimRight(src, "\r\n") +} + +func isComment(line []byte) bool { + return reComment.Match(trimLeadingWhitespace(trimNewline(line))) +} + +func isEmptyContinuationLine(line []byte) bool { + return len(trimLeadingWhitespace(trimNewline(line))) == 0 +} + +var utf8bom = []byte{0xEF, 0xBB, 0xBF} + +func trimContinuationCharacter(line string, d *directives) (string, bool) { + if d.lineContinuationRegex.MatchString(line) { + line = d.lineContinuationRegex.ReplaceAllString(line, "$1") + return line, false + } + return line, true +} + +// TODO: remove stripLeftWhitespace after deprecation period. It seems silly +// to preserve whitespace on continuation lines. Why is that done? +func processLine(d *directives, token []byte, stripLeftWhitespace bool) ([]byte, error) { + token = trimNewline(token) + if stripLeftWhitespace { + token = trimLeadingWhitespace(token) + } + return trimComments(token), d.possibleParserDirective(string(token)) +} + +// Variation of bufio.ScanLines that preserves the line endings +func scanLines(data []byte, atEOF bool) (advance int, token []byte, err error) { + if atEOF && len(data) == 0 { + return 0, nil, nil + } + if i := bytes.IndexByte(data, '\n'); i >= 0 { + return i + 1, data[0 : i+1], nil + } + if atEOF { + return len(data), data, nil + } + return 0, nil, nil +} + +func handleScannerError(err error) error { + switch err { + case bufio.ErrTooLong: + return errors.Errorf("dockerfile line greater than max allowed size of %d", bufio.MaxScanTokenSize-1) + default: + return err + } +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/split_command.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/split_command.go new file mode 100644 index 00000000..c0261652 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/split_command.go @@ -0,0 +1,117 @@ +package parser + +import ( + "strings" + "unicode" +) + +// splitCommand takes a single line of text and parses out the cmd and args, +// which are used for dispatching to more exact parsing functions. +func splitCommand(line string) (string, []string, string, error) { + var args string + var flags []string + + // Make sure we get the same results irrespective of leading/trailing spaces + cmdline := reWhitespace.Split(strings.TrimSpace(line), 2) + + if len(cmdline) == 2 { + var err error + args, flags, err = extractBuilderFlags(cmdline[1]) + if err != nil { + return "", nil, "", err + } + } + + return cmdline[0], flags, strings.TrimSpace(args), nil +} + +func extractBuilderFlags(line string) (string, []string, error) { + // Parses the BuilderFlags and returns the remaining part of the line + + const ( + inSpaces = iota // looking for start of a word + inWord + inQuote + ) + + words := []string{} + phase := inSpaces + word := "" + quote := '\000' + blankOK := false + var ch rune + + for pos := 0; pos <= len(line); pos++ { + if pos != len(line) { + ch = rune(line[pos]) + } + + if phase == inSpaces { // Looking for start of word + if pos == len(line) { // end of input + break + } + if unicode.IsSpace(ch) { // skip spaces + continue + } + + // Only keep going if the next word starts with -- + if ch != '-' || pos+1 == len(line) || rune(line[pos+1]) != '-' { + return line[pos:], words, nil + } + + phase = inWord // found something with "--", fall through + } + if (phase == inWord || phase == inQuote) && (pos == len(line)) { + if word != "--" && (blankOK || len(word) > 0) { + words = append(words, word) + } + break + } + if phase == inWord { + if unicode.IsSpace(ch) { + phase = inSpaces + if word == "--" { + return line[pos:], words, nil + } + if blankOK || len(word) > 0 { + words = append(words, word) + } + word = "" + blankOK = false + continue + } + if ch == '\'' || ch == '"' { + quote = ch + blankOK = true + phase = inQuote + continue + } + if ch == '\\' { + if pos+1 == len(line) { + continue // just skip \ at end + } + pos++ + ch = rune(line[pos]) + } + word += string(ch) + continue + } + if phase == inQuote { + if ch == quote { + phase = inWord + continue + } + if ch == '\\' { + if pos+1 == len(line) { + phase = inWord + continue // just skip \ at end + } + pos++ + ch = rune(line[pos]) + } + word += string(ch) + } + } + + return "", words, nil +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/envVarTest b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/envVarTest new file mode 100644 index 00000000..38534b0c --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/envVarTest @@ -0,0 +1,238 @@ +A|hello | hello +A|he'll'o | hello +A|he'llo | error +A|he\'llo | he'llo +A|he\\'llo | error +A|abc\tdef | abctdef +A|"abc\tdef" | abc\tdef +A|"abc\\tdef" | abc\tdef +A|'abc\tdef' | abc\tdef +A|hello\ | hello +A|hello\\ | hello\ +A|"hello | error +A|"hello\" | error +A|"hel'lo" | hel'lo +A|'hello | error +A|'hello\' | hello\ +A|'hello\there' | hello\there +A|'hello\\there' | hello\\there +A|"''" | '' +A|$. | $. +A|he$1x | hex +A|he$.x | he$.x +# Next one is different on Windows as $pwd==$PWD +U|he$pwd. | he. +W|he$pwd. | he/home. +A|he$PWD | he/home +A|he\$PWD | he$PWD +A|he\\$PWD | he\/home +A|"he\$PWD" | he$PWD +A|"he\\$PWD" | he\/home +A|\${} | ${} +A|\${}aaa | ${}aaa +A|he\${} | he${} +A|he\${}xx | he${}xx +A|${} | error +A|${}aaa | error +A|he${} | error +A|he${}xx | error +A|he${hi} | he +A|he${hi}xx | hexx +A|he${PWD} | he/home +A|he${.} | error +A|he${XXX:-000}xx | he000xx +A|he${PWD:-000}xx | he/homexx +A|he${XXX:-$PWD}xx | he/homexx +A|he${XXX:-${PWD:-yyy}}xx | he/homexx +A|he${XXX:-${YYY:-yyy}}xx | heyyyxx +A|he${XXX:YYY} | error +A|he${XXX?} | error +A|he${XXX:?} | error +A|he${PWD?} | he/home +A|he${PWD:?} | he/home +A|he${NULL?} | he +A|he${NULL:?} | error +A|he${XXX:+${PWD}}xx | hexx +A|he${PWD:+${XXX}}xx | hexx +A|he${PWD:+${SHELL}}xx | hebashxx +A|he${XXX:+000}xx | hexx +A|he${PWD:+000}xx | he000xx +A|'he${XX}' | he${XX} +A|"he${PWD}" | he/home +A|"he'$PWD'" | he'/home' +A|"$PWD" | /home +A|'$PWD' | $PWD +A|'\$PWD' | \$PWD +A|'"hello"' | "hello" +A|he\$PWD | he$PWD +A|"he\$PWD" | he$PWD +A|'he\$PWD' | he\$PWD +A|he${PWD | error +A|he${PWD:=000}xx | error +A|he${PWD:+${PWD}:}xx | he/home:xx +A|he${XXX:-\$PWD:}xx | he$PWD:xx +A|he${XXX:-\${PWD}z}xx | he${PWDz}xx +A|안녕하세요 | 안녕하세요 +A|안'ë…•'하세요 | 안녕하세요 +A|안'녕하세요 | error +A|안녕\'하세요 | 안녕'하세요 +A|안\\'녕하세요 | error +A|안녕\t하세요 | 안녕t하세요 +A|"안녕\t하세요" | 안녕\t하세요 +A|'안녕\t하세요 | error +A|안녕하세요\ | 안녕하세요 +A|안녕하세요\\ | 안녕하세요\ +A|"안녕하세요 | error +A|"안녕하세요\" | error +A|"안녕'하세요" | 안녕'하세요 +A|'안녕하세요 | error +A|'안녕하세요\' | 안녕하세요\ +A|안녕$1x | 안녕x +A|안녕$.x | 안녕$.x +# Next one is different on Windows as $pwd==$PWD +U|안녕$pwd. | 안녕. +W|안녕$pwd. | 안녕/home. +A|안녕$PWD | 안녕/home +A|안녕\$PWD | 안녕$PWD +A|안녕\\$PWD | 안녕\/home +A|안녕\${} | 안녕${} +A|안녕\${}xx | 안녕${}xx +A|안녕${} | error +A|안녕${}xx | error +A|안녕${hi} | 안녕 +A|안녕${hi}xx | 안녕xx +A|안녕${PWD} | 안녕/home +A|안녕${.} | error +A|안녕${XXX:-000}xx | 안녕000xx +A|안녕${PWD:-000}xx | 안녕/homexx +A|안녕${XXX:-$PWD}xx | 안녕/homexx +A|안녕${XXX:-${PWD:-yyy}}xx | 안녕/homexx +A|안녕${XXX:-${YYY:-yyy}}xx | 안녕yyyxx +A|안녕${XXX:YYY} | error +A|안녕${XXX:+${PWD}}xx | 안녕xx +A|안녕${PWD:+${XXX}}xx | 안녕xx +A|안녕${PWD:+${SHELL}}xx | 안녕bashxx +A|안녕${XXX:+000}xx | 안녕xx +A|안녕${PWD:+000}xx | 안녕000xx +A|'안녕${XX}' | 안녕${XX} +A|"안녕${PWD}" | 안녕/home +A|"안녕'$PWD'" | 안녕'/home' +A|'"안녕"' | "안녕" +A|안녕\$PWD | 안녕$PWD +A|"안녕\$PWD" | 안녕$PWD +A|'안녕\$PWD' | 안녕\$PWD +A|안녕${PWD | error +A|안녕${PWD:=000}xx | error +A|안녕${PWD:+${PWD}:}xx | 안녕/home:xx +A|안녕${XXX:-\$PWD:}xx | 안녕$PWD:xx +A|안녕${XXX:-\${PWD}z}xx | 안녕${PWDz}xx +A|$KOREAN | 한국어 +A|안녕$KOREAN | 안녕한국어 +A|${{aaa} | error +A|${aaa}} | } +A|${aaa | error +A|${{aaa:-bbb} | error +A|${aaa:-bbb}} | bbb} +A|${aaa:-bbb | error +A|${aaa:-bbb} | bbb +A|${aaa:-${bbb:-ccc}} | ccc +A|${aaa:-bbb ${foo} | error +A|${aaa:-bbb {foo} | bbb {foo +A|${:} | error +A|${:-bbb} | error +A|${:+bbb} | error + +# Positional parameters won't be set: +# http://pubs.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_05_01 +A|$1 | +A|${1} | +A|${1:+bbb} | +A|${1:-bbb} | bbb +A|$2 | +A|${2} | +A|${2:+bbb} | +A|${2:-bbb} | bbb +A|$3 | +A|${3} | +A|${3:+bbb} | +A|${3:-bbb} | bbb +A|$4 | +A|${4} | +A|${4:+bbb} | +A|${4:-bbb} | bbb +A|$5 | +A|${5} | +A|${5:+bbb} | +A|${5:-bbb} | bbb +A|$6 | +A|${6} | +A|${6:+bbb} | +A|${6:-bbb} | bbb +A|$7 | +A|${7} | +A|${7:+bbb} | +A|${7:-bbb} | bbb +A|$8 | +A|${8} | +A|${8:+bbb} | +A|${8:-bbb} | bbb +A|$9 | +A|${9} | +A|${9:+bbb} | +A|${9:-bbb} | bbb +A|$999 | +A|${999} | +A|${999:+bbb} | +A|${999:-bbb} | bbb +A|$999aaa | aaa +A|${999}aaa | aaa +A|${999:+bbb}aaa | aaa +A|${999:-bbb}aaa | bbbaaa +A|$001 | +A|${001} | +A|${001:+bbb} | +A|${001:-bbb} | bbb +A|$001aaa | aaa +A|${001}aaa | aaa +A|${001:+bbb}aaa | aaa +A|${001:-bbb}aaa | bbbaaa + +# Special parameters won't be set in the Dockerfile: +# http://pubs.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_05_02 +A|$@ | +A|${@} | +A|${@:+bbb} | +A|${@:-bbb} | bbb +A|$@@@ | @@ +A|$@aaa | aaa +A|${@}aaa | aaa +A|${@:+bbb}aaa | aaa +A|${@:-bbb}aaa | bbbaaa +A|$* | +A|${*} | +A|${*:+bbb} | +A|${*:-bbb} | bbb +A|$# | +A|${#} | +A|${#:+bbb} | +A|${#:-bbb} | bbb +A|$? | +A|${?} | +A|${?:+bbb} | +A|${?:-bbb} | bbb +A|$- | +A|${-} | +A|${-:+bbb} | +A|${-:-bbb} | bbb +A|$$ | +A|${$} | +A|${$:+bbb} | +A|${$:-bbb} | bbb +A|$! | +A|${!} | +A|${!:+bbb} | +A|${!:-bbb} | bbb +A|$0 | +A|${0} | +A|${0:+bbb} | +A|${0:-bbb} | bbb diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_unix.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_unix.go new file mode 100644 index 00000000..f9aca5d9 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_unix.go @@ -0,0 +1,11 @@ +//go:build !windows +// +build !windows + +package shell + +// EqualEnvKeys compare two strings and returns true if they are equal. +// On Unix this comparison is case-sensitive. +// On Windows this comparison is case-insensitive. +func EqualEnvKeys(from, to string) bool { + return from == to +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_windows.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_windows.go new file mode 100644 index 00000000..7bbed9b2 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_windows.go @@ -0,0 +1,10 @@ +package shell + +import "strings" + +// EqualEnvKeys compare two strings and returns true if they are equal. +// On Unix this comparison is case-sensitive. +// On Windows this comparison is case-insensitive. +func EqualEnvKeys(from, to string) bool { + return strings.EqualFold(from, to) +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex.go new file mode 100644 index 00000000..80806f8b --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex.go @@ -0,0 +1,474 @@ +package shell + +import ( + "bytes" + "fmt" + "strings" + "text/scanner" + "unicode" + + "github.com/pkg/errors" +) + +// Lex performs shell word splitting and variable expansion. +// +// Lex takes a string and an array of env variables and +// process all quotes (" and ') as well as $xxx and ${xxx} env variable +// tokens. Tries to mimic bash shell process. +// It doesn't support all flavors of ${xx:...} formats but new ones can +// be added by adding code to the "special ${} format processing" section +type Lex struct { + escapeToken rune + RawQuotes bool + RawEscapes bool + SkipProcessQuotes bool + SkipUnsetEnv bool +} + +// NewLex creates a new Lex which uses escapeToken to escape quotes. +func NewLex(escapeToken rune) *Lex { + return &Lex{escapeToken: escapeToken} +} + +// ProcessWord will use the 'env' list of environment variables, +// and replace any env var references in 'word'. +func (s *Lex) ProcessWord(word string, env []string) (string, error) { + word, _, err := s.process(word, BuildEnvs(env)) + return word, err +} + +// ProcessWords will use the 'env' list of environment variables, +// and replace any env var references in 'word' then it will also +// return a slice of strings which represents the 'word' +// split up based on spaces - taking into account quotes. Note that +// this splitting is done **after** the env var substitutions are done. +// Note, each one is trimmed to remove leading and trailing spaces (unless +// they are quoted", but ProcessWord retains spaces between words. +func (s *Lex) ProcessWords(word string, env []string) ([]string, error) { + _, words, err := s.process(word, BuildEnvs(env)) + return words, err +} + +// ProcessWordWithMap will use the 'env' list of environment variables, +// and replace any env var references in 'word'. +func (s *Lex) ProcessWordWithMap(word string, env map[string]string) (string, error) { + word, _, err := s.process(word, env) + return word, err +} + +// ProcessWordWithMatches will use the 'env' list of environment variables, +// replace any env var references in 'word' and return the env that were used. +func (s *Lex) ProcessWordWithMatches(word string, env map[string]string) (string, map[string]struct{}, error) { + sw := s.init(word, env) + word, _, err := sw.process(word) + return word, sw.matches, err +} + +func (s *Lex) ProcessWordsWithMap(word string, env map[string]string) ([]string, error) { + _, words, err := s.process(word, env) + return words, err +} + +func (s *Lex) init(word string, env map[string]string) *shellWord { + sw := &shellWord{ + envs: env, + escapeToken: s.escapeToken, + skipUnsetEnv: s.SkipUnsetEnv, + skipProcessQuotes: s.SkipProcessQuotes, + rawQuotes: s.RawQuotes, + rawEscapes: s.RawEscapes, + matches: make(map[string]struct{}), + } + sw.scanner.Init(strings.NewReader(word)) + return sw +} + +func (s *Lex) process(word string, env map[string]string) (string, []string, error) { + sw := s.init(word, env) + return sw.process(word) +} + +type shellWord struct { + scanner scanner.Scanner + envs map[string]string + escapeToken rune + rawQuotes bool + rawEscapes bool + skipUnsetEnv bool + skipProcessQuotes bool + matches map[string]struct{} +} + +func (sw *shellWord) process(source string) (string, []string, error) { + word, words, err := sw.processStopOn(scanner.EOF) + if err != nil { + err = errors.Wrapf(err, "failed to process %q", source) + } + return word, words, err +} + +type wordsStruct struct { + word string + words []string + inWord bool +} + +func (w *wordsStruct) addChar(ch rune) { + if unicode.IsSpace(ch) && w.inWord { + if len(w.word) != 0 { + w.words = append(w.words, w.word) + w.word = "" + w.inWord = false + } + } else if !unicode.IsSpace(ch) { + w.addRawChar(ch) + } +} + +func (w *wordsStruct) addRawChar(ch rune) { + w.word += string(ch) + w.inWord = true +} + +func (w *wordsStruct) addString(str string) { + for _, ch := range str { + w.addChar(ch) + } +} + +func (w *wordsStruct) addRawString(str string) { + w.word += str + w.inWord = true +} + +func (w *wordsStruct) getWords() []string { + if len(w.word) > 0 { + w.words = append(w.words, w.word) + + // Just in case we're called again by mistake + w.word = "" + w.inWord = false + } + return w.words +} + +// Process the word, starting at 'pos', and stop when we get to the +// end of the word or the 'stopChar' character +func (sw *shellWord) processStopOn(stopChar rune) (string, []string, error) { + var result bytes.Buffer + var words wordsStruct + + var charFuncMapping = map[rune]func() (string, error){ + '$': sw.processDollar, + } + if !sw.skipProcessQuotes { + charFuncMapping['\''] = sw.processSingleQuote + charFuncMapping['"'] = sw.processDoubleQuote + } + + for sw.scanner.Peek() != scanner.EOF { + ch := sw.scanner.Peek() + + if stopChar != scanner.EOF && ch == stopChar { + sw.scanner.Next() + return result.String(), words.getWords(), nil + } + if fn, ok := charFuncMapping[ch]; ok { + // Call special processing func for certain chars + tmp, err := fn() + if err != nil { + return "", []string{}, err + } + result.WriteString(tmp) + + if ch == rune('$') { + words.addString(tmp) + } else { + words.addRawString(tmp) + } + } else { + // Not special, just add it to the result + ch = sw.scanner.Next() + + if ch == sw.escapeToken { + if sw.rawEscapes { + words.addRawChar(ch) + result.WriteRune(ch) + } + + // '\' (default escape token, but ` allowed) escapes, except end of line + ch = sw.scanner.Next() + + if ch == scanner.EOF { + break + } + + words.addRawChar(ch) + } else { + words.addChar(ch) + } + + result.WriteRune(ch) + } + } + if stopChar != scanner.EOF { + return "", []string{}, errors.Errorf("unexpected end of statement while looking for matching %s", string(stopChar)) + } + return result.String(), words.getWords(), nil +} + +func (sw *shellWord) processSingleQuote() (string, error) { + // All chars between single quotes are taken as-is + // Note, you can't escape ' + // + // From the "sh" man page: + // Single Quotes + // Enclosing characters in single quotes preserves the literal meaning of + // all the characters (except single quotes, making it impossible to put + // single-quotes in a single-quoted string). + + var result bytes.Buffer + + ch := sw.scanner.Next() + if sw.rawQuotes { + result.WriteRune(ch) + } + + for { + ch = sw.scanner.Next() + switch ch { + case scanner.EOF: + return "", errors.New("unexpected end of statement while looking for matching single-quote") + case '\'': + if sw.rawQuotes { + result.WriteRune(ch) + } + return result.String(), nil + } + result.WriteRune(ch) + } +} + +func (sw *shellWord) processDoubleQuote() (string, error) { + // All chars up to the next " are taken as-is, even ', except any $ chars + // But you can escape " with a \ (or ` if escape token set accordingly) + // + // From the "sh" man page: + // Double Quotes + // Enclosing characters within double quotes preserves the literal meaning + // of all characters except dollarsign ($), backquote (`), and backslash + // (\). The backslash inside double quotes is historically weird, and + // serves to quote only the following characters: + // $ ` " \ . + // Otherwise it remains literal. + + var result bytes.Buffer + + ch := sw.scanner.Next() + if sw.rawQuotes { + result.WriteRune(ch) + } + + for { + switch sw.scanner.Peek() { + case scanner.EOF: + return "", errors.New("unexpected end of statement while looking for matching double-quote") + case '"': + ch := sw.scanner.Next() + if sw.rawQuotes { + result.WriteRune(ch) + } + return result.String(), nil + case '$': + value, err := sw.processDollar() + if err != nil { + return "", err + } + result.WriteString(value) + default: + ch := sw.scanner.Next() + if ch == sw.escapeToken { + if sw.rawEscapes { + result.WriteRune(ch) + } + + switch sw.scanner.Peek() { + case scanner.EOF: + // Ignore \ at end of word + continue + case '"', '$', sw.escapeToken: + // These chars can be escaped, all other \'s are left as-is + // Note: for now don't do anything special with ` chars. + // Not sure what to do with them anyway since we're not going + // to execute the text in there (not now anyway). + ch = sw.scanner.Next() + } + } + result.WriteRune(ch) + } + } +} + +func (sw *shellWord) processDollar() (string, error) { + sw.scanner.Next() + + // $xxx case + if sw.scanner.Peek() != '{' { + name := sw.processName() + if name == "" { + return "$", nil + } + value, found := sw.getEnv(name) + if !found && sw.skipUnsetEnv { + return "$" + name, nil + } + return value, nil + } + + sw.scanner.Next() + switch sw.scanner.Peek() { + case scanner.EOF: + return "", errors.New("syntax error: missing '}'") + case '{', '}', ':': + // Invalid ${{xx}, ${:xx}, ${:}. ${} case + return "", errors.New("syntax error: bad substitution") + } + name := sw.processName() + ch := sw.scanner.Next() + chs := string(ch) + nullIsUnset := false + + switch ch { + case '}': + // Normal ${xx} case + value, set := sw.getEnv(name) + if !set && sw.skipUnsetEnv { + return fmt.Sprintf("${%s}", name), nil + } + return value, nil + case ':': + nullIsUnset = true + ch = sw.scanner.Next() + chs += string(ch) + fallthrough + case '+', '-', '?': + word, _, err := sw.processStopOn('}') + if err != nil { + if sw.scanner.Peek() == scanner.EOF { + return "", errors.New("syntax error: missing '}'") + } + return "", err + } + + // Grab the current value of the variable in question so we + // can use it to determine what to do based on the modifier + value, set := sw.getEnv(name) + if sw.skipUnsetEnv && !set { + return fmt.Sprintf("${%s%s%s}", name, chs, word), nil + } + + switch ch { + case '-': + if !set || (nullIsUnset && value == "") { + return word, nil + } + return value, nil + case '+': + if !set || (nullIsUnset && value == "") { + return "", nil + } + return word, nil + case '?': + if !set { + message := "is not allowed to be unset" + if word != "" { + message = word + } + return "", errors.Errorf("%s: %s", name, message) + } + if nullIsUnset && value == "" { + message := "is not allowed to be empty" + if word != "" { + message = word + } + return "", errors.Errorf("%s: %s", name, message) + } + return value, nil + default: + return "", errors.Errorf("unsupported modifier (%s) in substitution", chs) + } + default: + return "", errors.Errorf("unsupported modifier (%s) in substitution", chs) + } +} + +func (sw *shellWord) processName() string { + // Read in a name (alphanumeric or _) + // If it starts with a numeric then just return $# + var name bytes.Buffer + + for sw.scanner.Peek() != scanner.EOF { + ch := sw.scanner.Peek() + if name.Len() == 0 && unicode.IsDigit(ch) { + for sw.scanner.Peek() != scanner.EOF && unicode.IsDigit(sw.scanner.Peek()) { + // Keep reading until the first non-digit character, or EOF + ch = sw.scanner.Next() + name.WriteRune(ch) + } + return name.String() + } + if name.Len() == 0 && isSpecialParam(ch) { + ch = sw.scanner.Next() + return string(ch) + } + if !unicode.IsLetter(ch) && !unicode.IsDigit(ch) && ch != '_' { + break + } + ch = sw.scanner.Next() + name.WriteRune(ch) + } + + return name.String() +} + +// isSpecialParam checks if the provided character is a special parameters, +// as defined in http://pubs.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_05_02 +func isSpecialParam(char rune) bool { + switch char { + case '@', '*', '#', '?', '-', '$', '!', '0': + // Special parameters + // http://pubs.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_05_02 + return true + } + return false +} + +func (sw *shellWord) getEnv(name string) (string, bool) { + for key, value := range sw.envs { + if EqualEnvKeys(name, key) { + sw.matches[name] = struct{}{} + return value, true + } + } + return "", false +} + +func BuildEnvs(env []string) map[string]string { + envs := map[string]string{} + + for _, e := range env { + i := strings.Index(e, "=") + + if i < 0 { + envs[e] = "" + } else { + k := e[:i] + v := e[i+1:] + + // overwrite value if key already exists + envs[k] = v + } + } + + return envs +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/wordsTest b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/wordsTest new file mode 100644 index 00000000..1fd9f194 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/wordsTest @@ -0,0 +1,30 @@ +hello | hello +hello${hi}bye | hellobye +ENV hi=hi +hello${hi}bye | hellohibye +ENV space=abc def +hello${space}bye | helloabc,defbye +hello"${space}"bye | helloabc defbye +hello "${space}"bye | hello,abc defbye +ENV leading= ab c +hello${leading}def | hello,ab,cdef +hello"${leading}" def | hello ab c,def +hello"${leading}" | hello ab c +hello${leading} | hello,ab,c +# next line MUST have 3 trailing spaces, don't erase them! +ENV trailing=ab c +hello${trailing} | helloab,c +hello${trailing}d | helloab,c,d +hello"${trailing}"d | helloab c d +# next line MUST have 3 trailing spaces, don't erase them! +hel"lo${trailing}" | helloab c +hello" there " | hello there +hello there | hello,there +hello\ there | hello there +hello" there | error +hello\" there | hello",there +hello"\\there" | hello\there +hello"\there" | hello\there +hello'\\there' | hello\\there +hello'\there' | hello\there +hello'$there' | hello$there diff --git a/vendor/github.com/moby/buildkit/util/stack/generate.go b/vendor/github.com/moby/buildkit/util/stack/generate.go new file mode 100644 index 00000000..f3e96778 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/stack/generate.go @@ -0,0 +1,3 @@ +package stack + +//go:generate protoc -I=. -I=../../vendor/ --go_out=. --go_opt=paths=source_relative --go_opt=Mstack.proto=/util/stack stack.proto diff --git a/vendor/github.com/moby/buildkit/util/stack/stack.go b/vendor/github.com/moby/buildkit/util/stack/stack.go new file mode 100644 index 00000000..fb9fc3dd --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/stack/stack.go @@ -0,0 +1,182 @@ +package stack + +import ( + "fmt" + io "io" + "os" + "runtime" + "strconv" + "strings" + "sync" + + "github.com/containerd/typeurl/v2" + "github.com/pkg/errors" +) + +var helpers map[string]struct{} +var helpersMu sync.RWMutex + +func init() { + typeurl.Register((*Stack)(nil), "github.com/moby/buildkit", "stack.Stack+json") + + helpers = map[string]struct{}{} +} + +var version string +var revision string + +func SetVersionInfo(v, r string) { + version = v + revision = r +} + +func Helper() { + var pc [1]uintptr + n := runtime.Callers(2, pc[:]) + if n == 0 { + return + } + frames := runtime.CallersFrames(pc[:n]) + frame, _ := frames.Next() + helpersMu.Lock() + helpers[frame.Function] = struct{}{} + helpersMu.Unlock() +} + +func Traces(err error) []*Stack { + var st []*Stack + + wrapped, ok := err.(interface { + Unwrap() error + }) + if ok { + st = Traces(wrapped.Unwrap()) + } + + if ste, ok := err.(interface { + StackTrace() errors.StackTrace + }); ok { + st = append(st, convertStack(ste.StackTrace())) + } + + if ste, ok := err.(interface { + StackTrace() *Stack + }); ok { + st = append(st, ste.StackTrace()) + } + + return st +} + +func Enable(err error) error { + if err == nil { + return nil + } + Helper() + if !hasLocalStackTrace(err) { + return errors.WithStack(err) + } + return err +} + +func Wrap(err error, s *Stack) error { + return &withStack{stack: s, error: err} +} + +func hasLocalStackTrace(err error) bool { + wrapped, ok := err.(interface { + Unwrap() error + }) + if ok && hasLocalStackTrace(wrapped.Unwrap()) { + return true + } + + _, ok = err.(interface { + StackTrace() errors.StackTrace + }) + return ok +} + +func Formatter(err error) fmt.Formatter { + return &formatter{err} +} + +type formatter struct { + error +} + +func (w *formatter) Format(s fmt.State, verb rune) { + if w.error == nil { + fmt.Fprintf(s, "%v", w.error) + return + } + switch verb { + case 'v': + if s.Flag('+') { + fmt.Fprintf(s, "%s\n", w.Error()) + for _, stack := range Traces(w.error) { + fmt.Fprintf(s, "%d %s %s\n", stack.Pid, stack.Version, strings.Join(stack.Cmdline, " ")) + for _, f := range stack.Frames { + fmt.Fprintf(s, "%s\n\t%s:%d\n", f.Name, f.File, f.Line) + } + fmt.Fprintln(s) + } + return + } + fallthrough + case 's': + io.WriteString(s, w.Error()) + case 'q': + fmt.Fprintf(s, "%q", w.Error()) + } +} + +func convertStack(s errors.StackTrace) *Stack { + var out Stack + helpersMu.RLock() + defer helpersMu.RUnlock() + for _, f := range s { + dt, err := f.MarshalText() + if err != nil { + continue + } + p := strings.SplitN(string(dt), " ", 2) + if len(p) != 2 { + continue + } + if _, ok := helpers[p[0]]; ok { + continue + } + idx := strings.LastIndexByte(p[1], ':') + if idx == -1 { + continue + } + line, err := strconv.ParseInt(p[1][idx+1:], 10, 32) + if err != nil { + continue + } + out.Frames = append(out.Frames, &Frame{ + Name: p[0], + File: p[1][:idx], + Line: int32(line), + }) + } + out.Cmdline = os.Args + out.Pid = int32(os.Getpid()) + out.Version = version + out.Revision = revision + return &out +} + +type withStack struct { + stack *Stack + error +} + +func (e *withStack) Unwrap() error { + return e.error +} + +func (e *withStack) StackTrace() *Stack { + return e.stack +} diff --git a/vendor/github.com/moby/buildkit/util/stack/stack.pb.go b/vendor/github.com/moby/buildkit/util/stack/stack.pb.go new file mode 100644 index 00000000..43809d48 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/stack/stack.pb.go @@ -0,0 +1,261 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.11.4 +// source: stack.proto + +package stack + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Stack struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Frames []*Frame `protobuf:"bytes,1,rep,name=frames,proto3" json:"frames,omitempty"` + Cmdline []string `protobuf:"bytes,2,rep,name=cmdline,proto3" json:"cmdline,omitempty"` + Pid int32 `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"` + Version string `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"` + Revision string `protobuf:"bytes,5,opt,name=revision,proto3" json:"revision,omitempty"` +} + +func (x *Stack) Reset() { + *x = Stack{} + if protoimpl.UnsafeEnabled { + mi := &file_stack_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Stack) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Stack) ProtoMessage() {} + +func (x *Stack) ProtoReflect() protoreflect.Message { + mi := &file_stack_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Stack.ProtoReflect.Descriptor instead. +func (*Stack) Descriptor() ([]byte, []int) { + return file_stack_proto_rawDescGZIP(), []int{0} +} + +func (x *Stack) GetFrames() []*Frame { + if x != nil { + return x.Frames + } + return nil +} + +func (x *Stack) GetCmdline() []string { + if x != nil { + return x.Cmdline + } + return nil +} + +func (x *Stack) GetPid() int32 { + if x != nil { + return x.Pid + } + return 0 +} + +func (x *Stack) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *Stack) GetRevision() string { + if x != nil { + return x.Revision + } + return "" +} + +type Frame struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` + File string `protobuf:"bytes,2,opt,name=File,proto3" json:"File,omitempty"` + Line int32 `protobuf:"varint,3,opt,name=Line,proto3" json:"Line,omitempty"` +} + +func (x *Frame) Reset() { + *x = Frame{} + if protoimpl.UnsafeEnabled { + mi := &file_stack_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Frame) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Frame) ProtoMessage() {} + +func (x *Frame) ProtoReflect() protoreflect.Message { + mi := &file_stack_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Frame.ProtoReflect.Descriptor instead. +func (*Frame) Descriptor() ([]byte, []int) { + return file_stack_proto_rawDescGZIP(), []int{1} +} + +func (x *Frame) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Frame) GetFile() string { + if x != nil { + return x.File + } + return "" +} + +func (x *Frame) GetLine() int32 { + if x != nil { + return x.Line + } + return 0 +} + +var File_stack_proto protoreflect.FileDescriptor + +var file_stack_proto_rawDesc = []byte{ + 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x73, + 0x74, 0x61, 0x63, 0x6b, 0x22, 0x8f, 0x01, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x63, 0x6b, 0x12, 0x24, + 0x0a, 0x06, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, + 0x2e, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x2e, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x52, 0x06, 0x66, 0x72, + 0x61, 0x6d, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6d, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6d, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x10, + 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x70, 0x69, 0x64, + 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, + 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, + 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x43, 0x0a, 0x05, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x46, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x4c, 0x69, 0x6e, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x4c, 0x69, 0x6e, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_stack_proto_rawDescOnce sync.Once + file_stack_proto_rawDescData = file_stack_proto_rawDesc +) + +func file_stack_proto_rawDescGZIP() []byte { + file_stack_proto_rawDescOnce.Do(func() { + file_stack_proto_rawDescData = protoimpl.X.CompressGZIP(file_stack_proto_rawDescData) + }) + return file_stack_proto_rawDescData +} + +var file_stack_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_stack_proto_goTypes = []interface{}{ + (*Stack)(nil), // 0: stack.Stack + (*Frame)(nil), // 1: stack.Frame +} +var file_stack_proto_depIdxs = []int32{ + 1, // 0: stack.Stack.frames:type_name -> stack.Frame + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_stack_proto_init() } +func file_stack_proto_init() { + if File_stack_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_stack_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Stack); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_stack_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Frame); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_stack_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_stack_proto_goTypes, + DependencyIndexes: file_stack_proto_depIdxs, + MessageInfos: file_stack_proto_msgTypes, + }.Build() + File_stack_proto = out.File + file_stack_proto_rawDesc = nil + file_stack_proto_goTypes = nil + file_stack_proto_depIdxs = nil +} diff --git a/vendor/github.com/moby/buildkit/util/stack/stack.proto b/vendor/github.com/moby/buildkit/util/stack/stack.proto new file mode 100644 index 00000000..9c63bc36 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/stack/stack.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; + +package stack; + +message Stack { + repeated Frame frames = 1; + repeated string cmdline = 2; + int32 pid = 3; + string version = 4; + string revision = 5; +} + +message Frame { + string Name = 1; + string File = 2; + int32 Line = 3; +} \ No newline at end of file diff --git a/vendor/github.com/moby/sys/mountinfo/mountinfo_linux.go b/vendor/github.com/moby/sys/mountinfo/mountinfo_linux.go index 59332b07..b32b5c9b 100644 --- a/vendor/github.com/moby/sys/mountinfo/mountinfo_linux.go +++ b/vendor/github.com/moby/sys/mountinfo/mountinfo_linux.go @@ -5,15 +5,19 @@ import ( "fmt" "io" "os" + "runtime" "strconv" "strings" + "sync" + + "golang.org/x/sys/unix" ) // GetMountsFromReader retrieves a list of mounts from the // reader provided, with an optional filter applied (use nil // for no filter). This can be useful in tests or benchmarks // that provide fake mountinfo data, or when a source other -// than /proc/self/mountinfo needs to be read from. +// than /proc/thread-self/mountinfo needs to be read from. // // This function is Linux-specific. func GetMountsFromReader(r io.Reader, filter FilterFunc) ([]*Info, error) { @@ -127,8 +131,40 @@ func GetMountsFromReader(r io.Reader, filter FilterFunc) ([]*Info, error) { return out, nil } -func parseMountTable(filter FilterFunc) ([]*Info, error) { - f, err := os.Open("/proc/self/mountinfo") +var ( + haveProcThreadSelf bool + haveProcThreadSelfOnce sync.Once +) + +func parseMountTable(filter FilterFunc) (_ []*Info, err error) { + haveProcThreadSelfOnce.Do(func() { + _, err := os.Stat("/proc/thread-self/mountinfo") + haveProcThreadSelf = err == nil + }) + + // We need to lock ourselves to the current OS thread in order to make sure + // that the thread referenced by /proc/thread-self stays alive until we + // finish parsing the file. + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + var f *os.File + if haveProcThreadSelf { + f, err = os.Open("/proc/thread-self/mountinfo") + } else { + // On pre-3.17 kernels (such as CentOS 7), we don't have + // /proc/thread-self/ so we need to manually construct + // /proc/self/task// as a fallback. + f, err = os.Open("/proc/self/task/" + strconv.Itoa(unix.Gettid()) + "/mountinfo") + if os.IsNotExist(err) { + // If /proc/self/task/... failed, it means that our active pid + // namespace doesn't match the pid namespace of the /proc mount. In + // this case we just have to make do with /proc/self, since there + // is no other way of figuring out our tid in a parent pid + // namespace on pre-3.17 kernels. + f, err = os.Open("/proc/self/mountinfo") + } + } if err != nil { return nil, err } @@ -158,10 +194,10 @@ func PidMountInfo(pid int) ([]*Info, error) { // A few specific characters in mountinfo path entries (root and mountpoint) // are escaped using a backslash followed by a character's ascii code in octal. // -// space -- as \040 -// tab (aka \t) -- as \011 -// newline (aka \n) -- as \012 -// backslash (aka \\) -- as \134 +// space -- as \040 +// tab (aka \t) -- as \011 +// newline (aka \n) -- as \012 +// backslash (aka \\) -- as \134 // // This function converts path from mountinfo back, i.e. it unescapes the above sequences. func unescape(path string) (string, error) { diff --git a/vendor/github.com/moby/sys/user/LICENSE b/vendor/github.com/moby/sys/user/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/moby/sys/user/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go b/vendor/github.com/moby/sys/user/lookup_unix.go similarity index 100% rename from vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go rename to vendor/github.com/moby/sys/user/lookup_unix.go diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/user.go b/vendor/github.com/moby/sys/user/user.go similarity index 100% rename from vendor/github.com/opencontainers/runc/libcontainer/user/user.go rename to vendor/github.com/moby/sys/user/user.go diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/user_fuzzer.go b/vendor/github.com/moby/sys/user/user_fuzzer.go similarity index 100% rename from vendor/github.com/opencontainers/runc/libcontainer/user/user_fuzzer.go rename to vendor/github.com/moby/sys/user/user_fuzzer.go diff --git a/vendor/github.com/opencontainers/runc/NOTICE b/vendor/github.com/opencontainers/runc/NOTICE index 5c97abce..c29775c0 100644 --- a/vendor/github.com/opencontainers/runc/NOTICE +++ b/vendor/github.com/opencontainers/runc/NOTICE @@ -8,9 +8,9 @@ The following is courtesy of our legal counsel: Use and transfer of Docker may be subject to certain restrictions by the -United States and other governments. +United States and other governments. It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. +violate applicable laws. For more information, please see http://www.bis.doc.gov diff --git a/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor_linux.go index 8b1483c7..17d36ed1 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor_linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor_linux.go @@ -26,14 +26,19 @@ func isEnabled() bool { } func setProcAttr(attr, value string) error { - // Under AppArmor you can only change your own attr, so use /proc/self/ - // instead of /proc// like libapparmor does - attrPath := "/proc/self/attr/apparmor/" + attr - if _, err := os.Stat(attrPath); errors.Is(err, os.ErrNotExist) { + attr = utils.CleanPath(attr) + attrSubPath := "attr/apparmor/" + attr + if _, err := os.Stat("/proc/self/" + attrSubPath); errors.Is(err, os.ErrNotExist) { // fall back to the old convention - attrPath = "/proc/self/attr/" + attr + attrSubPath = "attr/" + attr } + // Under AppArmor you can only change your own attr, so there's no reason + // to not use /proc/thread-self/ (instead of /proc//, like libapparmor + // does). + attrPath, closer := utils.ProcThreadSelf(attrSubPath) + defer closer() + f, err := os.OpenFile(attrPath, os.O_WRONLY, 0) if err != nil { return err diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go index fa195bf9..865344f9 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go @@ -2,8 +2,8 @@ package configs import "fmt" -// blockIODevice holds major:minor format supported in blkio cgroup -type blockIODevice struct { +// BlockIODevice holds major:minor format supported in blkio cgroup. +type BlockIODevice struct { // Major is the device's major number Major int64 `json:"major"` // Minor is the device's minor number @@ -12,7 +12,7 @@ type blockIODevice struct { // WeightDevice struct holds a `major:minor weight`|`major:minor leaf_weight` pair type WeightDevice struct { - blockIODevice + BlockIODevice // Weight is the bandwidth rate for the device, range is from 10 to 1000 Weight uint16 `json:"weight"` // LeafWeight is the bandwidth rate for the device while competing with the cgroup's child cgroups, range is from 10 to 1000, cfq scheduler only @@ -41,7 +41,7 @@ func (wd *WeightDevice) LeafWeightString() string { // ThrottleDevice struct holds a `major:minor rate_per_second` pair type ThrottleDevice struct { - blockIODevice + BlockIODevice // Rate is the IO rate limit per cgroup per device Rate uint64 `json:"rate"` } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go index 2d4a8987..4a34cf76 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go @@ -69,6 +69,9 @@ type Resources struct { // CPU hardcap limit (in usecs). Allowed cpu time in a given period. CpuQuota int64 `json:"cpu_quota"` + // CPU hardcap burst limit (in usecs). Allowed accumulated cpu time additionally for burst in a given period. + CpuBurst *uint64 `json:"cpu_burst"` //nolint:revive + // CPU period to be used for hardcapping (in usecs). 0 to use system default. CpuPeriod uint64 `json:"cpu_period"` @@ -84,6 +87,9 @@ type Resources struct { // MEM to use CpusetMems string `json:"cpuset_mems"` + // cgroup SCHED_IDLE + CPUIdle *int64 `json:"cpu_idle,omitempty"` + // Process limit; set <= `0' to disable limit. PidsLimit int64 `json:"pids_limit"` @@ -155,4 +161,9 @@ type Resources struct { // during Set() to figure out whether the freeze is required. Those // methods may be relatively slow, thus this flag. SkipFreezeOnSet bool `json:"-"` + + // MemoryCheckBeforeUpdate is a flag for cgroup v2 managers to check + // if the new memory limits (Memory and MemorySwap) being set are lower + // than the current memory usage, and reject if so. + MemoryCheckBeforeUpdate bool `json:"memory_check_before_update"` } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go index 6ebf5ec7..22fe0f9b 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go @@ -8,6 +8,7 @@ import ( "time" "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" "github.com/opencontainers/runc/libcontainer/devices" "github.com/opencontainers/runtime-spec/specs-go" @@ -31,12 +32,13 @@ type IDMap struct { // for syscalls. Additional architectures can be added by specifying them in // Architectures. type Seccomp struct { - DefaultAction Action `json:"default_action"` - Architectures []string `json:"architectures"` - Syscalls []*Syscall `json:"syscalls"` - DefaultErrnoRet *uint `json:"default_errno_ret"` - ListenerPath string `json:"listener_path,omitempty"` - ListenerMetadata string `json:"listener_metadata,omitempty"` + DefaultAction Action `json:"default_action"` + Architectures []string `json:"architectures"` + Flags []specs.LinuxSeccompFlag `json:"flags"` + Syscalls []*Syscall `json:"syscalls"` + DefaultErrnoRet *uint `json:"default_errno_ret"` + ListenerPath string `json:"listener_path,omitempty"` + ListenerMetadata string `json:"listener_metadata,omitempty"` } // Action is taken upon rule match in Seccomp @@ -83,9 +85,6 @@ type Syscall struct { Args []*Arg `json:"args"` } -// TODO Windows. Many of these fields should be factored out into those parts -// which are common across platforms, and those which are platform specific. - // Config defines configuration options for executing a process inside a contained environment. type Config struct { // NoPivotRoot will use MS_MOVE and a chroot to jail the process into the container's rootfs @@ -121,6 +120,9 @@ type Config struct { // Hostname optionally sets the container's hostname if provided Hostname string `json:"hostname"` + // Domainname optionally sets the container's domainname if provided + Domainname string `json:"domainname"` + // Namespaces specifies the container's namespaces that it should setup when cloning the init process // If a namespace is not provided that namespace is shared from the container's parent process Namespaces Namespaces `json:"namespaces"` @@ -158,11 +160,11 @@ type Config struct { // More information about kernel oom score calculation here: https://lwn.net/Articles/317814/ OomScoreAdj *int `json:"oom_score_adj,omitempty"` - // UidMappings is an array of User ID mappings for User Namespaces - UidMappings []IDMap `json:"uid_mappings"` + // UIDMappings is an array of User ID mappings for User Namespaces + UIDMappings []IDMap `json:"uid_mappings"` - // GidMappings is an array of Group ID mappings for User Namespaces - GidMappings []IDMap `json:"gid_mappings"` + // GIDMappings is an array of Group ID mappings for User Namespaces + GIDMappings []IDMap `json:"gid_mappings"` // MaskPaths specifies paths within the container's rootfs to mask over with a bind // mount pointing to /dev/null as to prevent reads of the file. @@ -211,8 +213,87 @@ type Config struct { // RootlessCgroups is set when unlikely to have the full access to cgroups. // When RootlessCgroups is set, cgroups errors are ignored. RootlessCgroups bool `json:"rootless_cgroups,omitempty"` + + // TimeOffsets specifies the offset for supporting time namespaces. + TimeOffsets map[string]specs.LinuxTimeOffset `json:"time_offsets,omitempty"` + + // Scheduler represents the scheduling attributes for a process. + Scheduler *Scheduler `json:"scheduler,omitempty"` + + // Personality contains configuration for the Linux personality syscall. + Personality *LinuxPersonality `json:"personality,omitempty"` + + // IOPriority is the container's I/O priority. + IOPriority *IOPriority `json:"io_priority,omitempty"` } +// Scheduler is based on the Linux sched_setattr(2) syscall. +type Scheduler = specs.Scheduler + +// ToSchedAttr is to convert *configs.Scheduler to *unix.SchedAttr. +func ToSchedAttr(scheduler *Scheduler) (*unix.SchedAttr, error) { + var policy uint32 + switch scheduler.Policy { + case specs.SchedOther: + policy = 0 + case specs.SchedFIFO: + policy = 1 + case specs.SchedRR: + policy = 2 + case specs.SchedBatch: + policy = 3 + case specs.SchedISO: + policy = 4 + case specs.SchedIdle: + policy = 5 + case specs.SchedDeadline: + policy = 6 + default: + return nil, fmt.Errorf("invalid scheduler policy: %s", scheduler.Policy) + } + + var flags uint64 + for _, flag := range scheduler.Flags { + switch flag { + case specs.SchedFlagResetOnFork: + flags |= 0x01 + case specs.SchedFlagReclaim: + flags |= 0x02 + case specs.SchedFlagDLOverrun: + flags |= 0x04 + case specs.SchedFlagKeepPolicy: + flags |= 0x08 + case specs.SchedFlagKeepParams: + flags |= 0x10 + case specs.SchedFlagUtilClampMin: + flags |= 0x20 + case specs.SchedFlagUtilClampMax: + flags |= 0x40 + default: + return nil, fmt.Errorf("invalid scheduler flag: %s", flag) + } + } + + return &unix.SchedAttr{ + Size: unix.SizeofSchedAttr, + Policy: policy, + Flags: flags, + Nice: scheduler.Nice, + Priority: uint32(scheduler.Priority), + Runtime: scheduler.Runtime, + Deadline: scheduler.Deadline, + Period: scheduler.Period, + }, nil +} + +var IOPrioClassMapping = map[specs.IOPriorityClass]int{ + specs.IOPRIO_CLASS_RT: 1, + specs.IOPRIO_CLASS_BE: 2, + specs.IOPRIO_CLASS_IDLE: 3, +} + +type IOPriority = specs.LinuxIOPriority + type ( HookName string HookList []Hook @@ -277,6 +358,7 @@ type Capabilities struct { Ambient []string } +// Deprecated: use (Hooks).Run instead. func (hooks HookList) RunHooks(state *specs.State) error { for i, h := range hooks { if err := h.Run(state); err != nil { @@ -333,6 +415,18 @@ func (hooks *Hooks) MarshalJSON() ([]byte, error) { }) } +// Run executes all hooks for the given hook name. +func (hooks Hooks) Run(name HookName, state *specs.State) error { + list := hooks[name] + for i, h := range list { + if err := h.Run(state); err != nil { + return fmt.Errorf("error running %s hook #%d: %w", name, i, err) + } + } + + return nil +} + type Hook interface { // Run executes the hook with the provided state. Run(*specs.State) error @@ -393,7 +487,7 @@ func (c Command) Run(s *specs.State) error { go func() { err := cmd.Wait() if err != nil { - err = fmt.Errorf("error running hook: %w, stdout: %s, stderr: %s", err, stdout.String(), stderr.String()) + err = fmt.Errorf("%w, stdout: %s, stderr: %s", err, stdout.String(), stderr.String()) } errC <- err }() diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/config_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/config_linux.go index 51fe9407..e401f533 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/config_linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/config_linux.go @@ -7,22 +7,33 @@ import ( ) var ( - errNoUIDMap = errors.New("User namespaces enabled, but no uid mappings found.") - errNoUserMap = errors.New("User namespaces enabled, but no user mapping found.") - errNoGIDMap = errors.New("User namespaces enabled, but no gid mappings found.") - errNoGroupMap = errors.New("User namespaces enabled, but no group mapping found.") + errNoUIDMap = errors.New("user namespaces enabled, but no uid mappings found") + errNoGIDMap = errors.New("user namespaces enabled, but no gid mappings found") ) +// Please check https://man7.org/linux/man-pages/man2/personality.2.html for const details. +// https://raw.githubusercontent.com/torvalds/linux/master/include/uapi/linux/personality.h +const ( + PerLinux = 0x0000 + PerLinux32 = 0x0008 +) + +type LinuxPersonality struct { + // Domain for the personality + // can only contain values "LINUX" and "LINUX32" + Domain int `json:"domain"` +} + // HostUID gets the translated uid for the process on host which could be // different when user namespaces are enabled. func (c Config) HostUID(containerId int) (int, error) { if c.Namespaces.Contains(NEWUSER) { - if c.UidMappings == nil { + if len(c.UIDMappings) == 0 { return -1, errNoUIDMap } - id, found := c.hostIDFromMapping(int64(containerId), c.UidMappings) + id, found := c.hostIDFromMapping(int64(containerId), c.UIDMappings) if !found { - return -1, errNoUserMap + return -1, fmt.Errorf("user namespaces enabled, but no mapping found for uid %d", containerId) } // If we are a 32-bit binary running on a 64-bit system, it's possible // the mapped user is too large to store in an int, which means we @@ -47,12 +58,12 @@ func (c Config) HostRootUID() (int, error) { // different when user namespaces are enabled. func (c Config) HostGID(containerId int) (int, error) { if c.Namespaces.Contains(NEWUSER) { - if c.GidMappings == nil { + if len(c.GIDMappings) == 0 { return -1, errNoGIDMap } - id, found := c.hostIDFromMapping(int64(containerId), c.GidMappings) + id, found := c.hostIDFromMapping(int64(containerId), c.GIDMappings) if !found { - return -1, errNoGroupMap + return -1, fmt.Errorf("user namespaces enabled, but no mapping found for gid %d", containerId) } // If we are a 32-bit binary running on a 64-bit system, it's possible // the mapped user is too large to store in an int, which means we diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go index 784c6182..bfd356e4 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go @@ -1,48 +1,7 @@ package configs -import "golang.org/x/sys/unix" - const ( // EXT_COPYUP is a directive to copy up the contents of a directory when // a tmpfs is mounted over it. - EXT_COPYUP = 1 << iota //nolint:golint // ignore "don't use ALL_CAPS" warning + EXT_COPYUP = 1 << iota //nolint:golint,revive // ignore "don't use ALL_CAPS" warning ) - -type Mount struct { - // Source path for the mount. - Source string `json:"source"` - - // Destination path for the mount inside the container. - Destination string `json:"destination"` - - // Device the mount is for. - Device string `json:"device"` - - // Mount flags. - Flags int `json:"flags"` - - // Propagation Flags - PropagationFlags []int `json:"propagation_flags"` - - // Mount data applied to the mount. - Data string `json:"data"` - - // Relabel source if set, "z" indicates shared, "Z" indicates unshared. - Relabel string `json:"relabel"` - - // RecAttr represents mount properties to be applied recursively (AT_RECURSIVE), see mount_setattr(2). - RecAttr *unix.MountAttr `json:"rec_attr"` - - // Extensions are additional flags that are specific to runc. - Extensions int `json:"extensions"` - - // Optional Command to be run before Source is mounted. - PremountCmds []Command `json:"premount_cmds"` - - // Optional Command to be run after Source is mounted. - PostmountCmds []Command `json:"postmount_cmds"` -} - -func (m *Mount) IsBind() bool { - return m.Flags&unix.MS_BIND != 0 -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/mount_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/mount_linux.go new file mode 100644 index 00000000..b69e9ab2 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/mount_linux.go @@ -0,0 +1,66 @@ +package configs + +import "golang.org/x/sys/unix" + +type MountIDMapping struct { + // Recursive indicates if the mapping needs to be recursive. + Recursive bool `json:"recursive"` + + // UserNSPath is a path to a user namespace that indicates the necessary + // id-mappings for MOUNT_ATTR_IDMAP. If set to non-"", UIDMappings and + // GIDMappings must be set to nil. + UserNSPath string `json:"userns_path,omitempty"` + + // UIDMappings is the uid mapping set for this mount, to be used with + // MOUNT_ATTR_IDMAP. + UIDMappings []IDMap `json:"uid_mappings,omitempty"` + + // GIDMappings is the gid mapping set for this mount, to be used with + // MOUNT_ATTR_IDMAP. + GIDMappings []IDMap `json:"gid_mappings,omitempty"` +} + +type Mount struct { + // Source path for the mount. + Source string `json:"source"` + + // Destination path for the mount inside the container. + Destination string `json:"destination"` + + // Device the mount is for. + Device string `json:"device"` + + // Mount flags. + Flags int `json:"flags"` + + // Mount flags that were explicitly cleared in the configuration (meaning + // the user explicitly requested that these flags *not* be set). + ClearedFlags int `json:"cleared_flags"` + + // Propagation Flags + PropagationFlags []int `json:"propagation_flags"` + + // Mount data applied to the mount. + Data string `json:"data"` + + // Relabel source if set, "z" indicates shared, "Z" indicates unshared. + Relabel string `json:"relabel"` + + // RecAttr represents mount properties to be applied recursively (AT_RECURSIVE), see mount_setattr(2). + RecAttr *unix.MountAttr `json:"rec_attr"` + + // Extensions are additional flags that are specific to runc. + Extensions int `json:"extensions"` + + // Mapping is the MOUNT_ATTR_IDMAP configuration for the mount. If non-nil, + // the mount is configured to use MOUNT_ATTR_IDMAP-style id mappings. + IDMapping *MountIDMapping `json:"id_mapping,omitempty"` +} + +func (m *Mount) IsBind() bool { + return m.Flags&unix.MS_BIND != 0 +} + +func (m *Mount) IsIDMapped() bool { + return m.IDMapping != nil +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/mount_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/mount_unsupported.go new file mode 100644 index 00000000..21541912 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/mount_unsupported.go @@ -0,0 +1,10 @@ +//go:build !linux +// +build !linux + +package configs + +type Mount struct{} + +func (m *Mount) IsBind() bool { + return false +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_linux.go index d52d6fcd..898f96fd 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_linux.go @@ -14,6 +14,7 @@ const ( NEWIPC NamespaceType = "NEWIPC" NEWUSER NamespaceType = "NEWUSER" NEWCGROUP NamespaceType = "NEWCGROUP" + NEWTIME NamespaceType = "NEWTIME" ) var ( @@ -38,6 +39,8 @@ func NsName(ns NamespaceType) string { return "uts" case NEWCGROUP: return "cgroup" + case NEWTIME: + return "time" } return "" } @@ -56,6 +59,9 @@ func IsNamespaceSupported(ns NamespaceType) bool { if nsFile == "" { return false } + // We don't need to use /proc/thread-self here because the list of + // namespace types is unrelated to the thread. This lets us avoid having to + // do runtime.LockOSThread. _, err := os.Stat("/proc/self/ns/" + nsFile) // a namespace is supported if it exists and we have permissions to read it supported = err == nil @@ -72,6 +78,7 @@ func NamespaceTypes() []NamespaceType { NEWPID, NEWNS, NEWCGROUP, + NEWTIME, } } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go index 0516dba8..15d8046f 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go @@ -17,6 +17,7 @@ var namespaceInfo = map[NamespaceType]int{ NEWUTS: unix.CLONE_NEWUTS, NEWPID: unix.CLONE_NEWPID, NEWCGROUP: unix.CLONE_NEWCGROUP, + NEWTIME: unix.CLONE_NEWTIME, } // CloneFlags parses the container's Namespaces options to set the correct @@ -31,3 +32,15 @@ func (n *Namespaces) CloneFlags() uintptr { } return uintptr(flag) } + +// IsPrivate tells whether the namespace of type t is configured as private +// (i.e. it exists and is not shared). +func (n Namespaces) IsPrivate(t NamespaceType) bool { + for _, v := range n { + if v.Type == t { + return v.Path == "" + } + } + // Not found, so implicitly sharing a parent namespace. + return false +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_deprecated.go b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_deprecated.go new file mode 100644 index 00000000..c6cd4434 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_deprecated.go @@ -0,0 +1,81 @@ +package user + +import ( + "io" + + "github.com/moby/sys/user" +) + +// LookupUser looks up a user by their username in /etc/passwd. If the user +// cannot be found (or there is no /etc/passwd file on the filesystem), then +// LookupUser returns an error. +func LookupUser(username string) (user.User, error) { + return user.LookupUser(username) +} + +// LookupUid looks up a user by their user id in /etc/passwd. If the user cannot +// be found (or there is no /etc/passwd file on the filesystem), then LookupId +// returns an error. +func LookupUid(uid int) (user.User, error) { //nolint:revive // ignore var-naming: func LookupUid should be LookupUID + return user.LookupUid(uid) +} + +// LookupGroup looks up a group by its name in /etc/group. If the group cannot +// be found (or there is no /etc/group file on the filesystem), then LookupGroup +// returns an error. +func LookupGroup(groupname string) (user.Group, error) { + return user.LookupGroup(groupname) +} + +// LookupGid looks up a group by its group id in /etc/group. If the group cannot +// be found (or there is no /etc/group file on the filesystem), then LookupGid +// returns an error. +func LookupGid(gid int) (user.Group, error) { + return user.LookupGid(gid) +} + +func GetPasswdPath() (string, error) { + return user.GetPasswdPath() +} + +func GetPasswd() (io.ReadCloser, error) { + return user.GetPasswd() +} + +func GetGroupPath() (string, error) { + return user.GetGroupPath() +} + +func GetGroup() (io.ReadCloser, error) { + return user.GetGroup() +} + +// CurrentUser looks up the current user by their user id in /etc/passwd. If the +// user cannot be found (or there is no /etc/passwd file on the filesystem), +// then CurrentUser returns an error. +func CurrentUser() (user.User, error) { + return user.CurrentUser() +} + +// CurrentGroup looks up the current user's group by their primary group id's +// entry in /etc/passwd. If the group cannot be found (or there is no +// /etc/group file on the filesystem), then CurrentGroup returns an error. +func CurrentGroup() (user.Group, error) { + return user.CurrentGroup() +} + +func CurrentUserSubUIDs() ([]user.SubID, error) { + return user.CurrentUserSubUIDs() +} + +func CurrentUserSubGIDs() ([]user.SubID, error) { + return user.CurrentUserSubGIDs() +} + +func CurrentProcessUIDMap() ([]user.IDMap, error) { + return user.CurrentProcessUIDMap() +} + +func CurrentProcessGIDMap() ([]user.IDMap, error) { + return user.CurrentProcessGIDMap() +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/user_deprecated.go b/vendor/github.com/opencontainers/runc/libcontainer/user/user_deprecated.go new file mode 100644 index 00000000..3c29f3d1 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/user/user_deprecated.go @@ -0,0 +1,146 @@ +// Package user is an alias for [github.com/moby/sys/user]. +// +// Deprecated: use [github.com/moby/sys/user]. +package user + +import ( + "io" + + "github.com/moby/sys/user" +) + +var ( + // ErrNoPasswdEntries is returned if no matching entries were found in /etc/group. + ErrNoPasswdEntries = user.ErrNoPasswdEntries + // ErrNoGroupEntries is returned if no matching entries were found in /etc/passwd. + ErrNoGroupEntries = user.ErrNoGroupEntries + // ErrRange is returned if a UID or GID is outside of the valid range. + ErrRange = user.ErrRange +) + +type ( + User = user.User + + Group = user.Group + + // SubID represents an entry in /etc/sub{u,g}id. + SubID = user.SubID + + // IDMap represents an entry in /proc/PID/{u,g}id_map. + IDMap = user.IDMap + + ExecUser = user.ExecUser +) + +func ParsePasswdFile(path string) ([]user.User, error) { + return user.ParsePasswdFile(path) +} + +func ParsePasswd(passwd io.Reader) ([]user.User, error) { + return user.ParsePasswd(passwd) +} + +func ParsePasswdFileFilter(path string, filter func(user.User) bool) ([]user.User, error) { + return user.ParsePasswdFileFilter(path, filter) +} + +func ParsePasswdFilter(r io.Reader, filter func(user.User) bool) ([]user.User, error) { + return user.ParsePasswdFilter(r, filter) +} + +func ParseGroupFile(path string) ([]user.Group, error) { + return user.ParseGroupFile(path) +} + +func ParseGroup(group io.Reader) ([]user.Group, error) { + return user.ParseGroup(group) +} + +func ParseGroupFileFilter(path string, filter func(user.Group) bool) ([]user.Group, error) { + return user.ParseGroupFileFilter(path, filter) +} + +func ParseGroupFilter(r io.Reader, filter func(user.Group) bool) ([]user.Group, error) { + return user.ParseGroupFilter(r, filter) +} + +// GetExecUserPath is a wrapper for GetExecUser. It reads data from each of the +// given file paths and uses that data as the arguments to GetExecUser. If the +// files cannot be opened for any reason, the error is ignored and a nil +// io.Reader is passed instead. +func GetExecUserPath(userSpec string, defaults *user.ExecUser, passwdPath, groupPath string) (*user.ExecUser, error) { + return user.GetExecUserPath(userSpec, defaults, passwdPath, groupPath) +} + +// GetExecUser parses a user specification string (using the passwd and group +// readers as sources for /etc/passwd and /etc/group data, respectively). In +// the case of blank fields or missing data from the sources, the values in +// defaults is used. +// +// GetExecUser will return an error if a user or group literal could not be +// found in any entry in passwd and group respectively. +// +// Examples of valid user specifications are: +// - "" +// - "user" +// - "uid" +// - "user:group" +// - "uid:gid +// - "user:gid" +// - "uid:group" +// +// It should be noted that if you specify a numeric user or group id, they will +// not be evaluated as usernames (only the metadata will be filled). So attempting +// to parse a user with user.Name = "1337" will produce the user with a UID of +// 1337. +func GetExecUser(userSpec string, defaults *user.ExecUser, passwd, group io.Reader) (*user.ExecUser, error) { + return user.GetExecUser(userSpec, defaults, passwd, group) +} + +// GetAdditionalGroups looks up a list of groups by name or group id +// against the given /etc/group formatted data. If a group name cannot +// be found, an error will be returned. If a group id cannot be found, +// or the given group data is nil, the id will be returned as-is +// provided it is in the legal range. +func GetAdditionalGroups(additionalGroups []string, group io.Reader) ([]int, error) { + return user.GetAdditionalGroups(additionalGroups, group) +} + +// GetAdditionalGroupsPath is a wrapper around GetAdditionalGroups +// that opens the groupPath given and gives it as an argument to +// GetAdditionalGroups. +func GetAdditionalGroupsPath(additionalGroups []string, groupPath string) ([]int, error) { + return user.GetAdditionalGroupsPath(additionalGroups, groupPath) +} + +func ParseSubIDFile(path string) ([]user.SubID, error) { + return user.ParseSubIDFile(path) +} + +func ParseSubID(subid io.Reader) ([]user.SubID, error) { + return user.ParseSubID(subid) +} + +func ParseSubIDFileFilter(path string, filter func(user.SubID) bool) ([]user.SubID, error) { + return user.ParseSubIDFileFilter(path, filter) +} + +func ParseSubIDFilter(r io.Reader, filter func(user.SubID) bool) ([]user.SubID, error) { + return user.ParseSubIDFilter(r, filter) +} + +func ParseIDMapFile(path string) ([]user.IDMap, error) { + return user.ParseIDMapFile(path) +} + +func ParseIDMap(r io.Reader) ([]user.IDMap, error) { + return user.ParseIDMap(r) +} + +func ParseIDMapFileFilter(path string, filter func(user.IDMap) bool) ([]user.IDMap, error) { + return user.ParseIDMapFileFilter(path, filter) +} + +func ParseIDMapFilter(r io.Reader, filter func(user.IDMap) bool) ([]user.IDMap, error) { + return user.ParseIDMapFilter(r, filter) +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/userns/userns.go b/vendor/github.com/opencontainers/runc/libcontainer/userns/userns.go index f6cb98e5..b225f18f 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/userns/userns.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/userns/userns.go @@ -1,5 +1,4 @@ package userns // RunningInUserNS detects whether we are currently running in a user namespace. -// Originally copied from github.com/lxc/lxd/shared/util.go var RunningInUserNS = runningInUserNS diff --git a/vendor/github.com/opencontainers/runc/libcontainer/userns/userns_fuzzer.go b/vendor/github.com/opencontainers/runc/libcontainer/userns/userns_fuzzer.go index 1e00ab8b..bff03f8d 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/userns/userns_fuzzer.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/userns/userns_fuzzer.go @@ -3,14 +3,7 @@ package userns -import ( - "strings" - - "github.com/opencontainers/runc/libcontainer/user" -) - -func FuzzUIDMap(data []byte) int { - uidmap, _ := user.ParseIDMap(strings.NewReader(string(data))) - _ = uidMapInUserNS(uidmap) +func FuzzUIDMap(uidmap []byte) int { + _ = uidMapInUserNS(string(uidmap)) return 1 } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/userns/userns_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/userns/userns_linux.go index 724e6df0..a6710b32 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/userns/userns_linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/userns/userns_linux.go @@ -1,9 +1,10 @@ package userns import ( + "bufio" + "fmt" + "os" "sync" - - "github.com/opencontainers/runc/libcontainer/user" ) var ( @@ -12,26 +13,43 @@ var ( ) // runningInUserNS detects whether we are currently running in a user namespace. -// Originally copied from github.com/lxc/lxd/shared/util.go +// +// Originally copied from https://github.com/lxc/incus/blob/e45085dd42f826b3c8c3228e9733c0b6f998eafe/shared/util.go#L678-L700. func runningInUserNS() bool { nsOnce.Do(func() { - uidmap, err := user.CurrentProcessUIDMap() + file, err := os.Open("/proc/self/uid_map") + if err != nil { + // This kernel-provided file only exists if user namespaces are supported. + return + } + defer file.Close() + + buf := bufio.NewReader(file) + l, _, err := buf.ReadLine() if err != nil { - // This kernel-provided file only exists if user namespaces are supported return } - inUserNS = uidMapInUserNS(uidmap) + + inUserNS = uidMapInUserNS(string(l)) }) return inUserNS } -func uidMapInUserNS(uidmap []user.IDMap) bool { - /* - * We assume we are in the initial user namespace if we have a full - * range - 4294967295 uids starting at uid 0. - */ - if len(uidmap) == 1 && uidmap[0].ID == 0 && uidmap[0].ParentID == 0 && uidmap[0].Count == 4294967295 { +func uidMapInUserNS(uidMap string) bool { + if uidMap == "" { + // File exist but empty (the initial state when userns is created, + // see user_namespaces(7)). + return true + } + + var a, b, c int64 + if _, err := fmt.Sscanf(uidMap, "%d %d %d", &a, &b, &c); err != nil { + // Assume we are in a regular, non user namespace. return false } - return true + + // As per user_namespaces(7), /proc/self/uid_map of + // the initial user namespace shows 0 0 4294967295. + initNS := a == 0 && b == 0 && c == 4294967295 + return !initNS } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/userns/userns_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/userns/userns_unsupported.go index f35c13a1..391c811c 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/userns/userns_unsupported.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/userns/userns_unsupported.go @@ -3,8 +3,6 @@ package userns -import "github.com/opencontainers/runc/libcontainer/user" - // runningInUserNS is a stub for non-Linux systems // Always returns false func runningInUserNS() bool { @@ -13,6 +11,6 @@ func runningInUserNS() bool { // uidMapInUserNS is a stub for non-Linux systems // Always returns false -func uidMapInUserNS(uidmap []user.IDMap) bool { +func uidMapInUserNS(uidMap string) bool { return false } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/userns/usernsfd_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/userns/usernsfd_linux.go new file mode 100644 index 00000000..2eb64cf7 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/userns/usernsfd_linux.go @@ -0,0 +1,156 @@ +package userns + +import ( + "fmt" + "os" + "sort" + "strings" + "sync" + "syscall" + + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" + + "github.com/opencontainers/runc/libcontainer/configs" +) + +type Mapping struct { + UIDMappings []configs.IDMap + GIDMappings []configs.IDMap +} + +func (m Mapping) toSys() (uids, gids []syscall.SysProcIDMap) { + for _, uid := range m.UIDMappings { + uids = append(uids, syscall.SysProcIDMap{ + ContainerID: int(uid.ContainerID), + HostID: int(uid.HostID), + Size: int(uid.Size), + }) + } + for _, gid := range m.GIDMappings { + gids = append(gids, syscall.SysProcIDMap{ + ContainerID: int(gid.ContainerID), + HostID: int(gid.HostID), + Size: int(gid.Size), + }) + } + return +} + +// id returns a unique identifier for this mapping, agnostic of the order of +// the uid and gid mappings (because the order doesn't matter to the kernel). +// The set of userns handles is indexed using this ID. +func (m Mapping) id() string { + var uids, gids []string + for _, idmap := range m.UIDMappings { + uids = append(uids, fmt.Sprintf("%d:%d:%d", idmap.ContainerID, idmap.HostID, idmap.Size)) + } + for _, idmap := range m.GIDMappings { + gids = append(gids, fmt.Sprintf("%d:%d:%d", idmap.ContainerID, idmap.HostID, idmap.Size)) + } + // We don't care about the sort order -- just sort them. + sort.Strings(uids) + sort.Strings(gids) + return "uid=" + strings.Join(uids, ",") + ";gid=" + strings.Join(gids, ",") +} + +type Handles struct { + m sync.Mutex + maps map[string]*os.File +} + +// Release all resources associated with this Handle. All existing files +// returned from Get() will continue to work even after calling Release(). The +// same Handles can be re-used after calling Release(). +func (hs *Handles) Release() { + hs.m.Lock() + defer hs.m.Unlock() + + // Close the files for good measure, though GC will do that for us anyway. + for _, file := range hs.maps { + _ = file.Close() + } + hs.maps = nil +} + +func spawnProc(req Mapping) (*os.Process, error) { + // We need to spawn a subprocess with the requested mappings, which is + // unfortunately quite expensive. The "safe" way of doing this is natively + // with Go (and then spawning something like "sleep infinity"), but + // execve() is a waste of cycles because we just need some process to have + // the right mapping, we don't care what it's executing. The "unsafe" + // option of doing a clone() behind the back of Go is probably okay in + // theory as long as we just do kill(getpid(), SIGSTOP). However, if we + // tell Go to put the new process into PTRACE_TRACEME mode, we can avoid + // the exec and not have to faff around with the mappings. + // + // Note that Go's stdlib does not support newuidmap, but in the case of + // id-mapped mounts, it seems incredibly unlikely that the user will be + // requesting us to do a remapping as an unprivileged user with mappings + // they have privileges over. + logrus.Debugf("spawning dummy process for id-mapping %s", req.id()) + uidMappings, gidMappings := req.toSys() + // We don't need to use /proc/thread-self here because the exe mm of a + // thread-group is guaranteed to be the same for all threads by definition. + // This lets us avoid having to do runtime.LockOSThread. + return os.StartProcess("/proc/self/exe", []string{"runc", "--help"}, &os.ProcAttr{ + Sys: &syscall.SysProcAttr{ + Cloneflags: unix.CLONE_NEWUSER, + UidMappings: uidMappings, + GidMappings: gidMappings, + GidMappingsEnableSetgroups: false, + // Put the process into PTRACE_TRACEME mode to allow us to get the + // userns without having a proper execve() target. + Ptrace: true, + }, + }) +} + +func dupFile(f *os.File) (*os.File, error) { + newFd, err := unix.FcntlInt(f.Fd(), unix.F_DUPFD_CLOEXEC, 0) + if err != nil { + return nil, os.NewSyscallError("fcntl(F_DUPFD_CLOEXEC)", err) + } + return os.NewFile(uintptr(newFd), f.Name()), nil +} + +// Get returns a handle to a /proc/$pid/ns/user nsfs file with the requested +// mapping. The processes spawned to produce userns nsfds are cached, so if +// equivalent user namespace mappings are requested, the same user namespace +// will be returned. The caller is responsible for closing the returned file +// descriptor. +func (hs *Handles) Get(req Mapping) (file *os.File, err error) { + hs.m.Lock() + defer hs.m.Unlock() + + if hs.maps == nil { + hs.maps = make(map[string]*os.File) + } + + file, ok := hs.maps[req.id()] + if !ok { + proc, err := spawnProc(req) + if err != nil { + return nil, fmt.Errorf("failed to spawn dummy process for map %s: %w", req.id(), err) + } + // Make sure we kill the helper process. We ignore errors because + // there's not much we can do about them anyway, and ultimately + defer func() { + _ = proc.Kill() + _, _ = proc.Wait() + }() + + // Stash away a handle to the userns file. This is neater than keeping + // the process alive, because Go's GC can handle files much better than + // leaked processes, and having long-living useless processes seems + // less than ideal. + file, err = os.Open(fmt.Sprintf("/proc/%d/ns/user", proc.Pid)) + if err != nil { + return nil, err + } + hs.maps[req.id()] = file + } + // Duplicate the file, to make sure the lifecycle of each *os.File we + // return is independent. + return dupFile(file) +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/utils/cmsg.go b/vendor/github.com/opencontainers/runc/libcontainer/utils/cmsg.go index 7ef9da21..2edd1417 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/utils/cmsg.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/utils/cmsg.go @@ -19,13 +19,14 @@ package utils import ( "fmt" "os" + "runtime" "golang.org/x/sys/unix" ) -// MaxSendfdLen is the maximum length of the name of a file descriptor being -// sent using SendFd. The name of the file handle returned by RecvFd will never -// be larger than this value. +// MaxNameLen is the maximum length of the name of a file descriptor being sent +// using SendFile. The name of the file handle returned by RecvFile will never be +// larger than this value. const MaxNameLen = 4096 // oobSpace is the size of the oob slice required to store a single FD. Note @@ -33,26 +34,21 @@ const MaxNameLen = 4096 // so sizeof(fd) = 4. var oobSpace = unix.CmsgSpace(4) -// RecvFd waits for a file descriptor to be sent over the given AF_UNIX +// RecvFile waits for a file descriptor to be sent over the given AF_UNIX // socket. The file name of the remote file descriptor will be recreated // locally (it is sent as non-auxiliary data in the same payload). -func RecvFd(socket *os.File) (*os.File, error) { - // For some reason, unix.Recvmsg uses the length rather than the capacity - // when passing the msg_controllen and other attributes to recvmsg. So we - // have to actually set the length. +func RecvFile(socket *os.File) (_ *os.File, Err error) { name := make([]byte, MaxNameLen) oob := make([]byte, oobSpace) sockfd := socket.Fd() - n, oobn, _, _, err := unix.Recvmsg(int(sockfd), name, oob, 0) + n, oobn, _, _, err := unix.Recvmsg(int(sockfd), name, oob, unix.MSG_CMSG_CLOEXEC) if err != nil { return nil, err } - if n >= MaxNameLen || oobn != oobSpace { - return nil, fmt.Errorf("recvfd: incorrect number of bytes read (n=%d oobn=%d)", n, oobn) + return nil, fmt.Errorf("recvfile: incorrect number of bytes read (n=%d oobn=%d)", n, oobn) } - // Truncate. name = name[:n] oob = oob[:oobn] @@ -61,36 +57,63 @@ func RecvFd(socket *os.File) (*os.File, error) { if err != nil { return nil, err } - if len(scms) != 1 { - return nil, fmt.Errorf("recvfd: number of SCMs is not 1: %d", len(scms)) + + // We cannot control how many SCM_RIGHTS we receive, and upon receiving + // them all of the descriptors are installed in our fd table, so we need to + // parse all of the SCM_RIGHTS we received in order to close all of the + // descriptors on error. + var fds []int + defer func() { + for i, fd := range fds { + if i == 0 && Err == nil { + // Only close the first one on error. + continue + } + // Always close extra ones. + _ = unix.Close(fd) + } + }() + var lastErr error + for _, scm := range scms { + if scm.Header.Type == unix.SCM_RIGHTS { + scmFds, err := unix.ParseUnixRights(&scm) + if err != nil { + lastErr = err + } else { + fds = append(fds, scmFds...) + } + } + } + if lastErr != nil { + return nil, lastErr } - scm := scms[0] - fds, err := unix.ParseUnixRights(&scm) - if err != nil { - return nil, err + // We do this after collecting the fds to make sure we close them all when + // returning an error here. + if len(scms) != 1 { + return nil, fmt.Errorf("recvfd: number of SCMs is not 1: %d", len(scms)) } if len(fds) != 1 { return nil, fmt.Errorf("recvfd: number of fds is not 1: %d", len(fds)) } - fd := uintptr(fds[0]) - - return os.NewFile(fd, string(name)), nil + return os.NewFile(uintptr(fds[0]), string(name)), nil } -// SendFd sends a file descriptor over the given AF_UNIX socket. In -// addition, the file.Name() of the given file will also be sent as -// non-auxiliary data in the same payload (allowing to send contextual -// information for a file descriptor). -func SendFd(socket *os.File, name string, fd uintptr) error { +// SendFile sends a file over the given AF_UNIX socket. file.Name() is also +// included so that if the other end uses RecvFile, the file will have the same +// name information. +func SendFile(socket *os.File, file *os.File) error { + name := file.Name() if len(name) >= MaxNameLen { return fmt.Errorf("sendfd: filename too long: %s", name) } - return SendFds(socket, []byte(name), int(fd)) + err := SendRawFd(socket, name, file.Fd()) + runtime.KeepAlive(file) + return err } -// SendFds sends a list of files descriptor and msg over the given AF_UNIX socket. -func SendFds(socket *os.File, msg []byte, fds ...int) error { - oob := unix.UnixRights(fds...) - return unix.Sendmsg(int(socket.Fd()), msg, oob, nil, 0) +// SendRawFd sends a specific file descriptor over the given AF_UNIX socket. +func SendRawFd(socket *os.File, msg string, fd uintptr) error { + oob := unix.UnixRights(int(fd)) + return unix.Sendmsg(int(socket.Fd()), []byte(msg), oob, nil, 0) } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go b/vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go index 6b9fc343..1b523d8a 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go @@ -3,15 +3,12 @@ package utils import ( "encoding/binary" "encoding/json" - "fmt" "io" "os" "path/filepath" - "strconv" "strings" "unsafe" - securejoin "github.com/cyphar/filepath-securejoin" "golang.org/x/sys/unix" ) @@ -43,6 +40,9 @@ func ExitStatus(status unix.WaitStatus) int { } // WriteJSON writes the provided struct v to w using standard json marshaling +// without a trailing newline. This is used instead of json.Encoder because +// there might be a problem in json decoder in some cases, see: +// https://github.com/docker/docker/issues/14203#issuecomment-174177790 func WriteJSON(w io.Writer, v interface{}) error { data, err := json.Marshal(v) if err != nil { @@ -99,52 +99,16 @@ func stripRoot(root, path string) string { return CleanPath("/" + path) } -// WithProcfd runs the passed closure with a procfd path (/proc/self/fd/...) -// corresponding to the unsafePath resolved within the root. Before passing the -// fd, this path is verified to have been inside the root -- so operating on it -// through the passed fdpath should be safe. Do not access this path through -// the original path strings, and do not attempt to use the pathname outside of -// the passed closure (the file handle will be freed once the closure returns). -func WithProcfd(root, unsafePath string, fn func(procfd string) error) error { - // Remove the root then forcefully resolve inside the root. - unsafePath = stripRoot(root, unsafePath) - path, err := securejoin.SecureJoin(root, unsafePath) - if err != nil { - return fmt.Errorf("resolving path inside rootfs failed: %w", err) - } - - // Open the target path. - fh, err := os.OpenFile(path, unix.O_PATH|unix.O_CLOEXEC, 0) - if err != nil { - return fmt.Errorf("open o_path procfd: %w", err) - } - defer fh.Close() - - // Double-check the path is the one we expected. - procfd := "/proc/self/fd/" + strconv.Itoa(int(fh.Fd())) - if realpath, err := os.Readlink(procfd); err != nil { - return fmt.Errorf("procfd verification failed: %w", err) - } else if realpath != path { - return fmt.Errorf("possibly malicious path detected -- refusing to operate on %s", realpath) - } - - // Run the closure. - return fn(procfd) -} - -// SearchLabels searches a list of key-value pairs for the provided key and -// returns the corresponding value. The pairs must be separated with '='. -func SearchLabels(labels []string, query string) string { - for _, l := range labels { - parts := strings.SplitN(l, "=", 2) - if len(parts) < 2 { - continue - } - if parts[0] == query { - return parts[1] +// SearchLabels searches through a list of key=value pairs for a given key, +// returning its value, and the binary flag telling whether the key exist. +func SearchLabels(labels []string, key string) (string, bool) { + key += "=" + for _, s := range labels { + if strings.HasPrefix(s, key) { + return s[len(key):], true } } - return "" + return "", false } // Annotations returns the bundle path and user defined annotations from the diff --git a/vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go b/vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go index bf3237a2..f57f0874 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go @@ -5,10 +5,16 @@ package utils import ( "fmt" + "math" "os" + "path/filepath" + "runtime" "strconv" + "sync" _ "unsafe" // for go:linkname + securejoin "github.com/cyphar/filepath-securejoin" + "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -24,12 +30,39 @@ func EnsureProcHandle(fh *os.File) error { return nil } +var ( + haveCloseRangeCloexecBool bool + haveCloseRangeCloexecOnce sync.Once +) + +func haveCloseRangeCloexec() bool { + haveCloseRangeCloexecOnce.Do(func() { + // Make sure we're not closing a random file descriptor. + tmpFd, err := unix.FcntlInt(0, unix.F_DUPFD_CLOEXEC, 0) + if err != nil { + return + } + defer unix.Close(tmpFd) + + err = unix.CloseRange(uint(tmpFd), uint(tmpFd), unix.CLOSE_RANGE_CLOEXEC) + // Any error means we cannot use close_range(CLOSE_RANGE_CLOEXEC). + // -ENOSYS and -EINVAL ultimately mean we don't have support, but any + // other potential error would imply that even the most basic close + // operation wouldn't work. + haveCloseRangeCloexecBool = err == nil + }) + return haveCloseRangeCloexecBool +} + type fdFunc func(fd int) // fdRangeFrom calls the passed fdFunc for each file descriptor that is open in // the current process. func fdRangeFrom(minFd int, fn fdFunc) error { - fdDir, err := os.Open("/proc/self/fd") + procSelfFd, closer := ProcThreadSelf("fd") + defer closer() + + fdDir, err := os.Open(procSelfFd) if err != nil { return err } @@ -67,6 +100,12 @@ func fdRangeFrom(minFd int, fn fdFunc) error { // CloseExecFrom sets the O_CLOEXEC flag on all file descriptors greater or // equal to minFd in the current process. func CloseExecFrom(minFd int) error { + // Use close_range(CLOSE_RANGE_CLOEXEC) if possible. + if haveCloseRangeCloexec() { + err := unix.CloseRange(uint(minFd), math.MaxUint, unix.CLOSE_RANGE_CLOEXEC) + return os.NewSyscallError("close_range", err) + } + // Otherwise, fall back to the standard loop. return fdRangeFrom(minFd, unix.CloseOnExec) } @@ -89,7 +128,8 @@ func runtime_IsPollDescriptor(fd uintptr) bool //nolint:revive // *os.File operations would apply to the wrong file). This function is only // intended to be called from the last stage of runc init. func UnsafeCloseFrom(minFd int) error { - // We must not close some file descriptors. + // We cannot use close_range(2) even if it is available, because we must + // not close some file descriptors. return fdRangeFrom(minFd, func(fd int) { if runtime_IsPollDescriptor(uintptr(fd)) { // These are the Go runtimes internal netpoll file descriptors. @@ -107,11 +147,117 @@ func UnsafeCloseFrom(minFd int) error { }) } -// NewSockPair returns a new unix socket pair -func NewSockPair(name string) (parent *os.File, child *os.File, err error) { +// NewSockPair returns a new SOCK_STREAM unix socket pair. +func NewSockPair(name string) (parent, child *os.File, err error) { fds, err := unix.Socketpair(unix.AF_LOCAL, unix.SOCK_STREAM|unix.SOCK_CLOEXEC, 0) if err != nil { return nil, nil, err } return os.NewFile(uintptr(fds[1]), name+"-p"), os.NewFile(uintptr(fds[0]), name+"-c"), nil } + +// WithProcfd runs the passed closure with a procfd path (/proc/self/fd/...) +// corresponding to the unsafePath resolved within the root. Before passing the +// fd, this path is verified to have been inside the root -- so operating on it +// through the passed fdpath should be safe. Do not access this path through +// the original path strings, and do not attempt to use the pathname outside of +// the passed closure (the file handle will be freed once the closure returns). +func WithProcfd(root, unsafePath string, fn func(procfd string) error) error { + // Remove the root then forcefully resolve inside the root. + unsafePath = stripRoot(root, unsafePath) + path, err := securejoin.SecureJoin(root, unsafePath) + if err != nil { + return fmt.Errorf("resolving path inside rootfs failed: %w", err) + } + + procSelfFd, closer := ProcThreadSelf("fd/") + defer closer() + + // Open the target path. + fh, err := os.OpenFile(path, unix.O_PATH|unix.O_CLOEXEC, 0) + if err != nil { + return fmt.Errorf("open o_path procfd: %w", err) + } + defer fh.Close() + + procfd := filepath.Join(procSelfFd, strconv.Itoa(int(fh.Fd()))) + // Double-check the path is the one we expected. + if realpath, err := os.Readlink(procfd); err != nil { + return fmt.Errorf("procfd verification failed: %w", err) + } else if realpath != path { + return fmt.Errorf("possibly malicious path detected -- refusing to operate on %s", realpath) + } + + return fn(procfd) +} + +type ProcThreadSelfCloser func() + +var ( + haveProcThreadSelf bool + haveProcThreadSelfOnce sync.Once +) + +// ProcThreadSelf returns a string that is equivalent to +// /proc/thread-self/, with a graceful fallback on older kernels where +// /proc/thread-self doesn't exist. This method DOES NOT use SecureJoin, +// meaning that the passed string needs to be trusted. The caller _must_ call +// the returned procThreadSelfCloser function (which is runtime.UnlockOSThread) +// *only once* after it has finished using the returned path string. +func ProcThreadSelf(subpath string) (string, ProcThreadSelfCloser) { + haveProcThreadSelfOnce.Do(func() { + if _, err := os.Stat("/proc/thread-self/"); err == nil { + haveProcThreadSelf = true + } else { + logrus.Debugf("cannot stat /proc/thread-self (%v), falling back to /proc/self/task/", err) + } + }) + + // We need to lock our thread until the caller is done with the path string + // because any non-atomic operation on the path (such as opening a file, + // then reading it) could be interrupted by the Go runtime where the + // underlying thread is swapped out and the original thread is killed, + // resulting in pull-your-hair-out-hard-to-debug issues in the caller. In + // addition, the pre-3.17 fallback makes everything non-atomic because the + // same thing could happen between unix.Gettid() and the path operations. + // + // In theory, we don't need to lock in the atomic user case when using + // /proc/thread-self/, but it's better to be safe than sorry (and there are + // only one or two truly atomic users of /proc/thread-self/). + runtime.LockOSThread() + + threadSelf := "/proc/thread-self/" + if !haveProcThreadSelf { + // Pre-3.17 kernels did not have /proc/thread-self, so do it manually. + threadSelf = "/proc/self/task/" + strconv.Itoa(unix.Gettid()) + "/" + if _, err := os.Stat(threadSelf); err != nil { + // Unfortunately, this code is called from rootfs_linux.go where we + // are running inside the pid namespace of the container but /proc + // is the host's procfs. Unfortunately there is no real way to get + // the correct tid to use here (the kernel age means we cannot do + // things like set up a private fsopen("proc") -- even scanning + // NSpid in all of the tasks in /proc/self/task/*/status requires + // Linux 4.1). + // + // So, we just have to assume that /proc/self is acceptable in this + // one specific case. + if os.Getpid() == 1 { + logrus.Debugf("/proc/thread-self (tid=%d) cannot be emulated inside the initial container setup -- using /proc/self instead: %v", unix.Gettid(), err) + } else { + // This should never happen, but the fallback should work in most cases... + logrus.Warnf("/proc/thread-self could not be emulated for pid=%d (tid=%d) -- using more buggy /proc/self fallback instead: %v", os.Getpid(), unix.Gettid(), err) + } + threadSelf = "/proc/self/" + } + } + return threadSelf + subpath, runtime.UnlockOSThread +} + +// ProcThreadSelfFd is small wrapper around ProcThreadSelf to make it easier to +// create a /proc/thread-self handle for given file descriptor. +// +// It is basically equivalent to ProcThreadSelf(fmt.Sprintf("fd/%d", fd)), but +// without using fmt.Sprintf to avoid unneeded overhead. +func ProcThreadSelfFd(fd uintptr) (string, ProcThreadSelfCloser) { + return ProcThreadSelf("fd/" + strconv.FormatUint(uint64(fd), 10)) +} diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go index 4e7717d5..d1236ba7 100644 --- a/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go +++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go @@ -187,6 +187,10 @@ type Hook struct { type Hooks struct { // Prestart is Deprecated. Prestart is a list of hooks to be run before the container process is executed. // It is called in the Runtime Namespace + // + // Deprecated: use [Hooks.CreateRuntime], [Hooks.CreateContainer], and + // [Hooks.StartContainer] instead, which allow more granular hook control + // during the create and start phase. Prestart []Hook `json:"prestart,omitempty"` // CreateRuntime is a list of hooks to be run after the container has been created but before pivot_root or any equivalent operation has been called // It is called in the Runtime Namespace @@ -371,6 +375,12 @@ type LinuxMemory struct { // Total memory limit (memory + swap). Swap *int64 `json:"swap,omitempty"` // Kernel memory limit (in bytes). + // + // Deprecated: kernel-memory limits are not supported in cgroups v2, and + // were obsoleted in [kernel v5.4]. This field should no longer be used, + // as it may be ignored by runtimes. + // + // [kernel v5.4]: https://github.com/torvalds/linux/commit/0158115f702b0ba208ab0 Kernel *int64 `json:"kernel,omitempty"` // Kernel memory limit for tcp (in bytes) KernelTCP *int64 `json:"kernelTCP,omitempty"` diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go index b3fca349..503971e0 100644 --- a/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go +++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go @@ -6,7 +6,7 @@ const ( // VersionMajor is for an API incompatible changes VersionMajor = 1 // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 1 + VersionMinor = 2 // VersionPatch is for backwards-compatible bug fixes VersionPatch = 0 diff --git a/vendor/github.com/opencontainers/runtime-tools/generate/generate.go b/vendor/github.com/opencontainers/runtime-tools/generate/generate.go index df97edcd..3e8f9ca6 100644 --- a/vendor/github.com/opencontainers/runtime-tools/generate/generate.go +++ b/vendor/github.com/opencontainers/runtime-tools/generate/generate.go @@ -323,7 +323,7 @@ func createEnvCacheMap(env []string) map[string]int { // // Deprecated: Replace with: // -// Use generator.Config = config +// Use generator.Config = config func (g *Generator) SetSpec(config *rspec.Spec) { g.Config = config } diff --git a/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/parse_action.go b/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/parse_action.go index 25daf075..a845af51 100644 --- a/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/parse_action.go +++ b/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/parse_action.go @@ -23,8 +23,10 @@ type SyscallOpts struct { func ParseSyscallFlag(args SyscallOpts, config *rspec.LinuxSeccomp) error { var arguments []string if args.Index != "" && args.Value != "" && args.ValueTwo != "" && args.Operator != "" { - arguments = []string{args.Action, args.Syscall, args.Index, args.Value, - args.ValueTwo, args.Operator} + arguments = []string{ + args.Action, args.Syscall, args.Index, args.Value, + args.ValueTwo, args.Operator, + } } else { arguments = []string{args.Action, args.Syscall} } diff --git a/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/seccomp_default.go b/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/seccomp_default.go index 345a32a6..12aa482c 100644 --- a/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/seccomp_default.go +++ b/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/seccomp_default.go @@ -32,7 +32,6 @@ func arches() []rspec.Arch { // DefaultProfile defines the whitelist for the default seccomp profile. func DefaultProfile(rs *specs.Spec) *rspec.LinuxSeccomp { - syscalls := []rspec.LinuxSyscall{ { Names: []string{ @@ -535,7 +534,6 @@ func DefaultProfile(rs *specs.Spec) *rspec.LinuxSeccomp { }, }, }...) - } arch := runtime.GOARCH diff --git a/vendor/github.com/openshift/imagebuilder/Makefile b/vendor/github.com/openshift/imagebuilder/Makefile index dea84bb2..6c545f4c 100644 --- a/vendor/github.com/openshift/imagebuilder/Makefile +++ b/vendor/github.com/openshift/imagebuilder/Makefile @@ -9,3 +9,9 @@ test: test-conformance: go test -v -tags conformance -timeout 45m ./dockerclient .PHONY: test-conformance + +.PHONY: vendor +vendor: + GO111MODULE=on go mod tidy + GO111MODULE=on go mod vendor + GO111MODULE=on go mod verify diff --git a/vendor/github.com/openshift/imagebuilder/builder.go b/vendor/github.com/openshift/imagebuilder/builder.go index b01ed0c0..74e118c3 100644 --- a/vendor/github.com/openshift/imagebuilder/builder.go +++ b/vendor/github.com/openshift/imagebuilder/builder.go @@ -13,6 +13,7 @@ import ( docker "github.com/fsouza/go-dockerclient" + buildkitparser "github.com/moby/buildkit/frontend/dockerfile/parser" "github.com/openshift/imagebuilder/dockerfile/command" "github.com/openshift/imagebuilder/dockerfile/parser" ) @@ -29,8 +30,20 @@ type Copy struct { Download bool // If set, the owner:group for the destination. This value is passed // to the executor for handling. - Chown string - Chmod string + Chown string + Chmod string + Checksum string + // Additional files which need to be created by executor for this + // instruction. + Files []File +} + +// File defines if any additional file needs to be created +// by the executor instruction so that specified command +// can execute/copy the created file inside the build container. +type File struct { + Name string // Name of the new file. + Data string // Content of the file. } // Run defines a run operation required in the container. @@ -41,6 +54,9 @@ type Run struct { Mounts []string // Network specifies the network mode to run the container with Network string + // Additional files which need to be created by executor for this + // instruction. + Files []File } type Executor interface { @@ -78,7 +94,7 @@ func (logExecutor) EnsureContainerPathAs(path, user string, mode *os.FileMode) e func (logExecutor) Copy(excludes []string, copies ...Copy) error { for _, c := range copies { - log.Printf("COPY %v -> %s (from:%s download:%t), chown: %s, chmod %s", c.Src, c.Dest, c.From, c.Download, c.Chown, c.Chmod) + log.Printf("COPY %v -> %s (from:%s download:%t), chown: %s, chmod %s, checksum: %s", c.Src, c.Dest, c.From, c.Download, c.Chown, c.Chmod, c.Checksum) } return nil } @@ -394,7 +410,7 @@ func (b *Builder) Run(step *Step, exec Executor, noRunsRemaining bool) error { if !ok { return exec.UnrecognizedInstruction(step) } - if err := fn(b, step.Args, step.Attrs, step.Flags, step.Original); err != nil { + if err := fn(b, step.Args, step.Attrs, step.Flags, step.Original, step.Heredocs); err != nil { return err } @@ -574,7 +590,7 @@ func SplitBy(node *parser.Node, value string) []*parser.Node { } // StepFunc is invoked with the result of a resolved step. -type StepFunc func(*Builder, []string, map[string]bool, []string, string) error +type StepFunc func(*Builder, []string, map[string]bool, []string, string, []buildkitparser.Heredoc) error var evaluateTable = map[string]StepFunc{ command.Env: env, diff --git a/vendor/github.com/openshift/imagebuilder/dispatchers.go b/vendor/github.com/openshift/imagebuilder/dispatchers.go index f264876c..f43adacb 100644 --- a/vendor/github.com/openshift/imagebuilder/dispatchers.go +++ b/vendor/github.com/openshift/imagebuilder/dispatchers.go @@ -22,6 +22,9 @@ import ( "github.com/containers/storage/pkg/regexp" "github.com/openshift/imagebuilder/signal" "github.com/openshift/imagebuilder/strslice" + + buildkitparser "github.com/moby/buildkit/frontend/dockerfile/parser" + buildkitshell "github.com/moby/buildkit/frontend/dockerfile/shell" ) var ( @@ -53,7 +56,7 @@ func init() { // // Sets the environment variable foo to bar, also makes interpolation // in the dockerfile available from the next statement on via ${foo}. -func env(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error { +func env(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string, heredocs []buildkitparser.Heredoc) error { if len(args) == 0 { return errAtLeastOneArgument("ENV") } @@ -94,7 +97,7 @@ func env(b *Builder, args []string, attributes map[string]bool, flagArgs []strin // MAINTAINER some text // // Sets the maintainer metadata. -func maintainer(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error { +func maintainer(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string, heredocs []buildkitparser.Heredoc) error { if len(args) != 1 { return errExactlyOneArgument("MAINTAINER") } @@ -105,7 +108,7 @@ func maintainer(b *Builder, args []string, attributes map[string]bool, flagArgs // LABEL some json data describing the image // // Sets the Label variable foo to bar, -func label(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error { +func label(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string, heredocs []buildkitparser.Heredoc) error { if len(args) == 0 { return errAtLeastOneArgument("LABEL") } @@ -127,16 +130,43 @@ func label(b *Builder, args []string, attributes map[string]bool, flagArgs []str return nil } +func processHereDocs(originalInstruction string, heredocs []buildkitparser.Heredoc, args []string) ([]File, error) { + var files []File + for _, heredoc := range heredocs { + var err error + content := heredoc.Content + if heredoc.Chomp { + content = buildkitparser.ChompHeredocContent(content) + } + if heredoc.Expand { + shlex := buildkitshell.NewLex('\\') + shlex.RawQuotes = true + shlex.RawEscapes = true + content, err = shlex.ProcessWord(content, args) + if err != nil { + return nil, err + } + } + file := File{ + Data: content, + Name: heredoc.Name, + } + files = append(files, file) + } + return files, nil +} + // ADD foo /path // // Add the file 'foo' to '/path'. Tarball and Remote URL (git, http) handling // exist here. If you do not wish to have this automatic handling, use COPY. -func add(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error { +func add(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string, heredocs []buildkitparser.Heredoc) error { if len(args) < 2 { return errAtLeastTwoArgument("ADD") } var chown string var chmod string + var checksum string last := len(args) - 1 dest := makeAbsolute(args[last], b.RunConfig.WorkingDir) filteredUserArgs := make(map[string]string) @@ -154,24 +184,43 @@ func add(b *Builder, args []string, attributes map[string]bool, flagArgs []strin switch { case strings.HasPrefix(arg, "--chown="): chown = strings.TrimPrefix(arg, "--chown=") + if chown == "" { + return fmt.Errorf("no value specified for --chown=") + } case strings.HasPrefix(arg, "--chmod="): chmod = strings.TrimPrefix(arg, "--chmod=") err = checkChmodConversion(chmod) if err != nil { return err } + case strings.HasPrefix(arg, "--checksum="): + checksum = strings.TrimPrefix(arg, "--checksum=") + if checksum == "" { + return fmt.Errorf("no value specified for --checksum=") + } default: - return fmt.Errorf("ADD only supports the --chmod= and the --chown= flag") + return fmt.Errorf("ADD only supports the --chmod=, --chown=, and --checksum= flags") } } - b.PendingCopies = append(b.PendingCopies, Copy{Src: args[0:last], Dest: dest, Download: true, Chown: chown, Chmod: chmod}) + files, err := processHereDocs(original, heredocs, userArgs) + if err != nil { + return err + } + b.PendingCopies = append(b.PendingCopies, Copy{ + Src: args[0:last], + Dest: dest, + Download: true, + Chown: chown, + Chmod: chmod, + Checksum: checksum, + Files: files}) return nil } // COPY foo /path // // Same as 'ADD' but without the tar and remote url handling. -func dispatchCopy(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error { +func dispatchCopy(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string, heredocs []buildkitparser.Heredoc) error { if len(args) < 2 { return errAtLeastTwoArgument("COPY") } @@ -189,6 +238,9 @@ func dispatchCopy(b *Builder, args []string, attributes map[string]bool, flagArg switch { case strings.HasPrefix(arg, "--chown="): chown = strings.TrimPrefix(arg, "--chown=") + if chown == "" { + return fmt.Errorf("no value specified for --chown=") + } case strings.HasPrefix(arg, "--chmod="): chmod = strings.TrimPrefix(arg, "--chmod=") err = checkChmodConversion(chmod) @@ -197,18 +249,25 @@ func dispatchCopy(b *Builder, args []string, attributes map[string]bool, flagArg } case strings.HasPrefix(arg, "--from="): from = strings.TrimPrefix(arg, "--from=") + if from == "" { + return fmt.Errorf("no value specified for --from=") + } default: return fmt.Errorf("COPY only supports the --chmod= --chown= and the --from= flags") } } - b.PendingCopies = append(b.PendingCopies, Copy{From: from, Src: args[0:last], Dest: dest, Download: false, Chown: chown, Chmod: chmod}) + files, err := processHereDocs(original, heredocs, userArgs) + if err != nil { + return err + } + b.PendingCopies = append(b.PendingCopies, Copy{From: from, Src: args[0:last], Dest: dest, Download: false, Chown: chown, Chmod: chmod, Files: files}) return nil } // FROM imagename // // This sets the image the dockerfile will build on top of. -func from(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error { +func from(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string, heredocs []buildkitparser.Heredoc) error { switch { case len(args) == 1: case len(args) == 3 && len(args[0]) > 0 && strings.EqualFold(args[1], "as") && len(args[2]) > 0: @@ -255,6 +314,9 @@ func from(b *Builder, args []string, attributes map[string]bool, flagArgs []stri switch { case strings.HasPrefix(arg, "--platform="): platformString := strings.TrimPrefix(arg, "--platform=") + if platformString == "" { + return fmt.Errorf("no value specified for --platform=") + } b.Platform = platformString default: return fmt.Errorf("FROM only supports the --platform flag") @@ -273,7 +335,7 @@ func from(b *Builder, args []string, attributes map[string]bool, flagArgs []stri // evaluator.go and comments around dispatch() in the same file explain the // special cases. search for 'OnBuild' in internals.go for additional special // cases. -func onbuild(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error { +func onbuild(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string, heredocs []buildkitparser.Heredoc) error { if len(args) == 0 { return errAtLeastOneArgument("ONBUILD") } @@ -295,7 +357,7 @@ func onbuild(b *Builder, args []string, attributes map[string]bool, flagArgs []s // WORKDIR /tmp // // Set the working directory for future RUN/CMD/etc statements. -func workdir(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error { +func workdir(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string, heredocs []buildkitparser.Heredoc) error { if len(args) != 1 { return errExactlyOneArgument("WORKDIR") } @@ -322,7 +384,7 @@ func workdir(b *Builder, args []string, attributes map[string]bool, flagArgs []s // RUN echo hi # sh -c echo hi (Linux) // RUN echo hi # cmd /S /C echo hi (Windows) // RUN [ "echo", "hi" ] # echo hi -func run(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error { +func run(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string, heredocs []buildkitparser.Heredoc) error { if b.RunConfig.Image == "" { return fmt.Errorf("Please provide a source image with `from` prior to run") } @@ -346,18 +408,30 @@ func run(b *Builder, args []string, attributes map[string]bool, flagArgs []strin switch { case strings.HasPrefix(arg, "--mount="): mount := strings.TrimPrefix(arg, "--mount=") + if mount == "" { + return fmt.Errorf("no value specified for --mount=") + } mounts = append(mounts, mount) case strings.HasPrefix(arg, "--network="): network = strings.TrimPrefix(arg, "--network=") + if network == "" { + return fmt.Errorf("no value specified for --network=") + } default: return fmt.Errorf("RUN only supports the --mount and --network flag") } } + files, err := processHereDocs(original, heredocs, userArgs) + if err != nil { + return err + } + run := Run{ Args: args, Mounts: mounts, Network: network, + Files: files, } if !attributes["json"] { @@ -371,7 +445,7 @@ func run(b *Builder, args []string, attributes map[string]bool, flagArgs []strin // // Set the default command to run in the container (which may be empty). // Argument handling is the same as RUN. -func cmd(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error { +func cmd(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string, heredocs []buildkitparser.Heredoc) error { cmdSlice := handleJSONArgs(args, attributes) if !attributes["json"] { @@ -396,7 +470,7 @@ func cmd(b *Builder, args []string, attributes map[string]bool, flagArgs []strin // // Handles command processing similar to CMD and RUN, only b.RunConfig.Entrypoint // is initialized at NewBuilder time instead of through argument parsing. -func entrypoint(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error { +func entrypoint(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string, heredocs []buildkitparser.Heredoc) error { parsed := handleJSONArgs(args, attributes) switch { @@ -427,7 +501,7 @@ func entrypoint(b *Builder, args []string, attributes map[string]bool, flagArgs // // Expose ports for links and port mappings. This all ends up in // b.RunConfig.ExposedPorts for runconfig. -func expose(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error { +func expose(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string, heredocs []buildkitparser.Heredoc) error { if len(args) == 0 { return errAtLeastOneArgument("EXPOSE") } @@ -454,7 +528,7 @@ func expose(b *Builder, args []string, attributes map[string]bool, flagArgs []st // // Set the user to 'foo' for future commands and when running the // ENTRYPOINT/CMD at container run time. -func user(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error { +func user(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string, heredocs []buildkitparser.Heredoc) error { if len(args) != 1 { return errExactlyOneArgument("USER") } @@ -466,7 +540,7 @@ func user(b *Builder, args []string, attributes map[string]bool, flagArgs []stri // VOLUME /foo // // Expose the volume /foo for use. Will also accept the JSON array form. -func volume(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error { +func volume(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string, heredocs []buildkitparser.Heredoc) error { if len(args) == 0 { return errAtLeastOneArgument("VOLUME") } @@ -488,7 +562,7 @@ func volume(b *Builder, args []string, attributes map[string]bool, flagArgs []st // STOPSIGNAL signal // // Set the signal that will be used to kill the container. -func stopSignal(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error { +func stopSignal(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string, heredocs []buildkitparser.Heredoc) error { if len(args) != 1 { return errExactlyOneArgument("STOPSIGNAL") } @@ -506,7 +580,7 @@ func stopSignal(b *Builder, args []string, attributes map[string]bool, flagArgs // // Set the default healthcheck command to run in the container (which may be empty). // Argument handling is the same as RUN. -func healthcheck(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error { +func healthcheck(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string, heredocs []buildkitparser.Heredoc) error { if len(args) == 0 { return errAtLeastOneArgument("HEALTHCHECK") } @@ -599,7 +673,7 @@ var targetArgs = []string{"TARGETOS", "TARGETARCH", "TARGETVARIANT"} // Adds the variable foo to the trusted list of variables that can be passed // to builder using the --build-arg flag for expansion/subsitution or passing to 'run'. // Dockerfile author may optionally set a default value of this variable. -func arg(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error { +func arg(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string, heredocs []buildkitparser.Heredoc) error { var ( name string value string @@ -665,7 +739,7 @@ func arg(b *Builder, args []string, attributes map[string]bool, flagArgs []strin // SHELL powershell -command // // Set the non-default shell to use. -func shell(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error { +func shell(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string, heredocs []buildkitparser.Heredoc) error { shellSlice := handleJSONArgs(args, attributes) switch { case len(shellSlice) == 0: diff --git a/vendor/github.com/openshift/imagebuilder/dockerfile/parser/parser.go b/vendor/github.com/openshift/imagebuilder/dockerfile/parser/parser.go index f5bef441..f8cb979a 100644 --- a/vendor/github.com/openshift/imagebuilder/dockerfile/parser/parser.go +++ b/vendor/github.com/openshift/imagebuilder/dockerfile/parser/parser.go @@ -15,6 +15,8 @@ import ( sRegexp "github.com/containers/storage/pkg/regexp" "github.com/containers/storage/pkg/system" + buildkitparser "github.com/moby/buildkit/frontend/dockerfile/parser" + buildkitshell "github.com/moby/buildkit/frontend/dockerfile/shell" "github.com/openshift/imagebuilder/dockerfile/command" ) @@ -30,14 +32,15 @@ import ( // but lucky for us the Dockerfile isn't very complicated. This structure // works a little more effectively than a "proper" parse tree for our needs. type Node struct { - Value string // actual content - Next *Node // the next item in the current sexp - Children []*Node // the children of this sexp - Attributes map[string]bool // special attributes for this node - Original string // original line used before parsing - Flags []string // only top Node should have this set - StartLine int // the line in the original dockerfile where the node begins - EndLine int // the line in the original dockerfile where the node ends + Value string // actual content + Next *Node // the next item in the current sexp + Children []*Node // the children of this sexp + Heredocs []buildkitparser.Heredoc // extra heredoc content attachments + Attributes map[string]bool // special attributes for this node + Original string // original line used before parsing + Flags []string // only top Node should have this set + StartLine int // the line in the original dockerfile where the node begins + EndLine int // the line in the original dockerfile where the node ends } // Dump dumps the AST defined by `node` as a list of sexps. @@ -53,7 +56,11 @@ func (node *Node) Dump() string { for _, n := range node.Children { str += "(" + n.Dump() + ")\n" } - + if len(node.Heredocs) > 0 { + for _, doc := range node.Heredocs { + str += "(" + doc.Name + "-" + doc.Content + "-" + strconv.Itoa(int(doc.FileDescriptor)) + "-" + strconv.FormatBool(doc.Expand) + "-" + strconv.FormatBool(doc.Chomp) + ")\n" + } + } for n := node.Next; n != nil; n = n.Next { if len(n.Children) > 0 { str += " " + n.Dump() @@ -70,6 +77,24 @@ func (node *Node) lines(start, end int) { node.EndLine = end } +func (node *Node) canContainHeredoc() bool { + // check for compound commands, like ONBUILD + if ok := heredocCompoundDirectives[strings.ToLower(node.Value)]; ok { + if node.Next != nil && len(node.Next.Children) > 0 { + node = node.Next.Children[0] + } + } + + if ok := heredocDirectives[strings.ToLower(node.Value)]; !ok { + return false + } + if isJSON := node.Attributes["json"]; isJSON { + return false + } + + return true +} + // AddChild adds a new child node, and updates line information func (node *Node) AddChild(child *Node, startLine, endLine int) { child.lines(startLine, endLine) @@ -94,6 +119,20 @@ const DefaultEscapeToken = '\\' // defaultPlatformToken is the platform assumed for the build if not explicitly provided var defaultPlatformToken = runtime.GOOS +var ( + // Directives allowed to contain heredocs + heredocDirectives = map[string]bool{ + command.Add: true, + command.Copy: true, + command.Run: true, + } + + // Directives allowed to contain directives containing heredocs + heredocCompoundDirectives = map[string]bool{ + command.Onbuild: true, + } +) + // Directive is the structure used during a build run to hold the state of // parsing directives. type Directive struct { @@ -261,6 +300,10 @@ func Parse(rwc io.Reader) (*Result, error) { currentLine := 0 root := &Node{StartLine: -1} scanner := bufio.NewScanner(rwc) + buf := []byte{} + // containerfile may contain large lines, + // allocate 2MB for such use-cases. + scanner.Buffer(buf, 2048*1024) warnings := []string{} var err error @@ -309,9 +352,46 @@ func Parse(rwc io.Reader) (*Result, error) { if err != nil { return nil, err } + + if child.canContainHeredoc() { + heredocs, err := heredocsFromLine(line) + if err != nil { + return nil, err + } + + for _, heredoc := range heredocs { + terminator := []byte(heredoc.Name) + terminated := false + for scanner.Scan() { + bytesRead := scanner.Bytes() + currentLine++ + + possibleTerminator := trimNewline(bytesRead) + if heredoc.Chomp { + possibleTerminator = trimLeadingTabs(possibleTerminator) + } + if bytes.Equal(possibleTerminator, terminator) { + terminated = true + break + } + heredoc.Content += "\n" + heredoc.Content += string(bytesRead) + } + if !terminated { + return nil, fmt.Errorf("%s: unterminated heredoc", heredoc.Name) + } + + child.Heredocs = append(child.Heredocs, heredoc) + } + } + root.AddChild(child, startLine, currentLine) } + if scannerErr := scanner.Err(); scannerErr != nil { + return nil, scannerErr + } + if len(warnings) > 0 { warnings = append(warnings, "[WARNING]: Empty continuation lines will become errors in a future release.") } @@ -323,6 +403,26 @@ func Parse(rwc io.Reader) (*Result, error) { }, nil } +func heredocsFromLine(line string) ([]buildkitparser.Heredoc, error) { + shlex := buildkitshell.NewLex('\\') + shlex.RawQuotes = true + shlex.RawEscapes = true + shlex.SkipUnsetEnv = true + words, _ := shlex.ProcessWords(line, []string{}) + + var docs []buildkitparser.Heredoc + for _, word := range words { + heredoc, err := buildkitparser.ParseHeredoc(word) + if err != nil { + return nil, err + } + if heredoc != nil { + docs = append(docs, *heredoc) + } + } + return docs, nil +} + func trimComments(src []byte) []byte { return tokenComment.ReplaceAll(src, []byte{}) } @@ -331,6 +431,16 @@ func trimWhitespace(src []byte) []byte { return bytes.TrimLeftFunc(src, unicode.IsSpace) } +func trimLeadingWhitespace(src []byte) []byte { + return bytes.TrimLeftFunc(src, unicode.IsSpace) +} +func trimLeadingTabs(src []byte) []byte { + return bytes.TrimLeft(src, "\t") +} +func trimNewline(src []byte) []byte { + return bytes.TrimRight(src, "\r\n") +} + func isEmptyContinuationLine(line []byte) bool { return len(trimComments(trimWhitespace(line))) == 0 } diff --git a/vendor/github.com/openshift/imagebuilder/evaluator.go b/vendor/github.com/openshift/imagebuilder/evaluator.go index b05f6c64..7bea4f48 100644 --- a/vendor/github.com/openshift/imagebuilder/evaluator.go +++ b/vendor/github.com/openshift/imagebuilder/evaluator.go @@ -7,6 +7,7 @@ import ( "github.com/openshift/imagebuilder/dockerfile/command" "github.com/openshift/imagebuilder/dockerfile/parser" + buildkitparser "github.com/moby/buildkit/frontend/dockerfile/parser" ) // ParseDockerfile parses the provided stream as a canonical Dockerfile @@ -34,14 +35,10 @@ var replaceEnvAllowed = map[string]bool{ // Certain commands are allowed to have their args split into more // words after env var replacements. Meaning: -// -// ENV foo="123 456" -// EXPOSE $foo -// +// ENV foo="123 456" +// EXPOSE $foo // should result in the same thing as: -// -// EXPOSE 123 456 -// +// EXPOSE 123 456 // and not treat "123 456" as a single word. // Note that: EXPOSE "$foo" and EXPOSE $foo are not the same thing. // Quotes will cause it to still be treated as single word. @@ -59,6 +56,7 @@ type Step struct { Flags []string Attrs map[string]bool Message string + Heredocs []buildkitparser.Heredoc Original string } @@ -78,6 +76,7 @@ type Step struct { // deal with that, at least until it becomes more of a general concern with new // features. func (b *Step) Resolve(ast *parser.Node) error { + b.Heredocs = ast.Heredocs cmd := ast.Value upperCasedCmd := strings.ToUpper(cmd) diff --git a/vendor/github.com/openshift/imagebuilder/imagebuilder.spec b/vendor/github.com/openshift/imagebuilder/imagebuilder.spec index 8f26e33a..194707c5 100644 --- a/vendor/github.com/openshift/imagebuilder/imagebuilder.spec +++ b/vendor/github.com/openshift/imagebuilder/imagebuilder.spec @@ -12,7 +12,7 @@ # %global golang_version 1.8.1 -%{!?version: %global version 1.2.5} +%{!?version: %global version 1.2.6-dev} %{!?release: %global release 1} %global package_name imagebuilder %global product_name Container Image Builder diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_v001_schema.go index 2263abd7..93dce871 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_v001_schema.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_v001_schema.go @@ -39,8 +39,7 @@ import ( type CoseV001Schema struct { // data - // Required: true - Data *CoseV001SchemaData `json:"data"` + Data *CoseV001SchemaData `json:"data,omitempty"` // The COSE Sign1 Message // Format: byte @@ -71,9 +70,8 @@ func (m *CoseV001Schema) Validate(formats strfmt.Registry) error { } func (m *CoseV001Schema) validateData(formats strfmt.Registry) error { - - if err := validate.Required("data", "body", m.Data); err != nil { - return err + if swag.IsZero(m.Data) { // not required + return nil } if m.Data != nil { @@ -117,6 +115,10 @@ func (m *CoseV001Schema) contextValidateData(ctx context.Context, formats strfmt if m.Data != nil { + if swag.IsZero(m.Data) { // not required + return nil + } + if err := m.Data.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("data") diff --git a/vendor/github.com/spf13/cobra/.golangci.yml b/vendor/github.com/spf13/cobra/.golangci.yml index 2578d94b..a618ec24 100644 --- a/vendor/github.com/spf13/cobra/.golangci.yml +++ b/vendor/github.com/spf13/cobra/.golangci.yml @@ -19,7 +19,7 @@ linters: disable-all: true enable: #- bodyclose - - deadcode + # - deadcode ! deprecated since v1.49.0; replaced by 'unused' #- depguard #- dogsled #- dupl @@ -51,12 +51,12 @@ linters: #- rowserrcheck #- scopelint #- staticcheck - - structcheck + #- structcheck ! deprecated since v1.49.0; replaced by 'unused' #- stylecheck #- typecheck - unconvert #- unparam - #- unused - - varcheck + - unused + # - varcheck ! deprecated since v1.49.0; replaced by 'unused' #- whitespace fast: false diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md index 592c0b8a..6444f4b7 100644 --- a/vendor/github.com/spf13/cobra/README.md +++ b/vendor/github.com/spf13/cobra/README.md @@ -4,7 +4,7 @@ Cobra is a library for creating powerful modern CLI applications. Cobra is used in many Go projects such as [Kubernetes](https://kubernetes.io/), [Hugo](https://gohugo.io), and [GitHub CLI](https://github.com/cli/cli) to -name a few. [This list](./projects_using_cobra.md) contains a more extensive list of projects using Cobra. +name a few. [This list](site/content/projects_using_cobra.md) contains a more extensive list of projects using Cobra. [![](https://img.shields.io/github/actions/workflow/status/spf13/cobra/test.yml?branch=main&longCache=true&label=Test&logo=github%20actions&logoColor=fff)](https://github.com/spf13/cobra/actions?query=workflow%3ATest) [![Go Reference](https://pkg.go.dev/badge/github.com/spf13/cobra.svg)](https://pkg.go.dev/github.com/spf13/cobra) @@ -80,7 +80,7 @@ which maintains the same interface while adding POSIX compliance. # Installing Using Cobra is easy. First, use `go get` to install the latest version -of the library. +of the library. ``` go get -u github.com/spf13/cobra@latest @@ -105,8 +105,8 @@ go install github.com/spf13/cobra-cli@latest For complete details on using the Cobra-CLI generator, please read [The Cobra Generator README](https://github.com/spf13/cobra-cli/blob/main/README.md) -For complete details on using the Cobra library, please read the [The Cobra User Guide](user_guide.md). +For complete details on using the Cobra library, please read the [The Cobra User Guide](site/content/user_guide.md). # License -Cobra is released under the Apache 2.0 license. See [LICENSE.txt](https://github.com/spf13/cobra/blob/master/LICENSE.txt) +Cobra is released under the Apache 2.0 license. See [LICENSE.txt](LICENSE.txt) diff --git a/vendor/github.com/spf13/cobra/active_help.go b/vendor/github.com/spf13/cobra/active_help.go index 2d023943..5f965e05 100644 --- a/vendor/github.com/spf13/cobra/active_help.go +++ b/vendor/github.com/spf13/cobra/active_help.go @@ -17,6 +17,7 @@ package cobra import ( "fmt" "os" + "regexp" "strings" ) @@ -29,6 +30,8 @@ const ( activeHelpGlobalDisable = "0" ) +var activeHelpEnvVarPrefixSubstRegexp = regexp.MustCompile(`[^A-Z0-9_]`) + // AppendActiveHelp adds the specified string to the specified array to be used as ActiveHelp. // Such strings will be processed by the completion script and will be shown as ActiveHelp // to the user. @@ -42,7 +45,7 @@ func AppendActiveHelp(compArray []string, activeHelpStr string) []string { // GetActiveHelpConfig returns the value of the ActiveHelp environment variable // _ACTIVE_HELP where is the name of the root command in upper -// case, with all - replaced by _. +// case, with all non-ASCII-alphanumeric characters replaced by `_`. // It will always return "0" if the global environment variable COBRA_ACTIVE_HELP // is set to "0". func GetActiveHelpConfig(cmd *Command) string { @@ -55,9 +58,10 @@ func GetActiveHelpConfig(cmd *Command) string { // activeHelpEnvVar returns the name of the program-specific ActiveHelp environment // variable. It has the format _ACTIVE_HELP where is the name of the -// root command in upper case, with all - replaced by _. +// root command in upper case, with all non-ASCII-alphanumeric characters replaced by `_`. func activeHelpEnvVar(name string) string { // This format should not be changed: users will be using it explicitly. activeHelpEnvVar := strings.ToUpper(fmt.Sprintf("%s%s", name, activeHelpEnvVarSuffix)) - return strings.ReplaceAll(activeHelpEnvVar, "-", "_") + activeHelpEnvVar = activeHelpEnvVarPrefixSubstRegexp.ReplaceAllString(activeHelpEnvVar, "_") + return activeHelpEnvVar } diff --git a/vendor/github.com/spf13/cobra/active_help.md b/vendor/github.com/spf13/cobra/active_help.md deleted file mode 100644 index 5e7f59af..00000000 --- a/vendor/github.com/spf13/cobra/active_help.md +++ /dev/null @@ -1,157 +0,0 @@ -# Active Help - -Active Help is a framework provided by Cobra which allows a program to define messages (hints, warnings, etc) that will be printed during program usage. It aims to make it easier for your users to learn how to use your program. If configured by the program, Active Help is printed when the user triggers shell completion. - -For example, -``` -bash-5.1$ helm repo add [tab] -You must choose a name for the repo you are adding. - -bash-5.1$ bin/helm package [tab] -Please specify the path to the chart to package - -bash-5.1$ bin/helm package [tab][tab] -bin/ internal/ scripts/ pkg/ testdata/ -``` - -**Hint**: A good place to use Active Help messages is when the normal completion system does not provide any suggestions. In such cases, Active Help nicely supplements the normal shell completions to guide the user in knowing what is expected by the program. -## Supported shells - -Active Help is currently only supported for the following shells: -- Bash (using [bash completion V2](shell_completions.md#bash-completion-v2) only). Note that bash 4.4 or higher is required for the prompt to appear when an Active Help message is printed. -- Zsh - -## Adding Active Help messages - -As Active Help uses the shell completion system, the implementation of Active Help messages is done by enhancing custom dynamic completions. If you are not familiar with dynamic completions, please refer to [Shell Completions](shell_completions.md). - -Adding Active Help is done through the use of the `cobra.AppendActiveHelp(...)` function, where the program repeatedly adds Active Help messages to the list of completions. Keep reading for details. - -### Active Help for nouns - -Adding Active Help when completing a noun is done within the `ValidArgsFunction(...)` of a command. Please notice the use of `cobra.AppendActiveHelp(...)` in the following example: - -```go -cmd := &cobra.Command{ - Use: "add [NAME] [URL]", - Short: "add a chart repository", - Args: require.ExactArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - return addRepo(args) - }, - ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - var comps []string - if len(args) == 0 { - comps = cobra.AppendActiveHelp(comps, "You must choose a name for the repo you are adding") - } else if len(args) == 1 { - comps = cobra.AppendActiveHelp(comps, "You must specify the URL for the repo you are adding") - } else { - comps = cobra.AppendActiveHelp(comps, "This command does not take any more arguments") - } - return comps, cobra.ShellCompDirectiveNoFileComp - }, -} -``` -The example above defines the completions (none, in this specific example) as well as the Active Help messages for the `helm repo add` command. It yields the following behavior: -``` -bash-5.1$ helm repo add [tab] -You must choose a name for the repo you are adding - -bash-5.1$ helm repo add grafana [tab] -You must specify the URL for the repo you are adding - -bash-5.1$ helm repo add grafana https://grafana.github.io/helm-charts [tab] -This command does not take any more arguments -``` -**Hint**: As can be seen in the above example, a good place to use Active Help messages is when the normal completion system does not provide any suggestions. In such cases, Active Help nicely supplements the normal shell completions. - -### Active Help for flags - -Providing Active Help for flags is done in the same fashion as for nouns, but using the completion function registered for the flag. For example: -```go -_ = cmd.RegisterFlagCompletionFunc("version", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - if len(args) != 2 { - return cobra.AppendActiveHelp(nil, "You must first specify the chart to install before the --version flag can be completed"), cobra.ShellCompDirectiveNoFileComp - } - return compVersionFlag(args[1], toComplete) - }) -``` -The example above prints an Active Help message when not enough information was given by the user to complete the `--version` flag. -``` -bash-5.1$ bin/helm install myrelease --version 2.0.[tab] -You must first specify the chart to install before the --version flag can be completed - -bash-5.1$ bin/helm install myrelease bitnami/solr --version 2.0.[tab][tab] -2.0.1 2.0.2 2.0.3 -``` - -## User control of Active Help - -You may want to allow your users to disable Active Help or choose between different levels of Active Help. It is entirely up to the program to define the type of configurability of Active Help that it wants to offer, if any. -Allowing to configure Active Help is entirely optional; you can use Active Help in your program without doing anything about Active Help configuration. - -The way to configure Active Help is to use the program's Active Help environment -variable. That variable is named `_ACTIVE_HELP` where `` is the name of your -program in uppercase with any `-` replaced by an `_`. The variable should be set by the user to whatever -Active Help configuration values are supported by the program. - -For example, say `helm` has chosen to support three levels for Active Help: `on`, `off`, `local`. Then a user -would set the desired behavior to `local` by doing `export HELM_ACTIVE_HELP=local` in their shell. - -For simplicity, when in `cmd.ValidArgsFunction(...)` or a flag's completion function, the program should read the -Active Help configuration using the `cobra.GetActiveHelpConfig(cmd)` function and select what Active Help messages -should or should not be added (instead of reading the environment variable directly). - -For example: -```go -ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - activeHelpLevel := cobra.GetActiveHelpConfig(cmd) - - var comps []string - if len(args) == 0 { - if activeHelpLevel != "off" { - comps = cobra.AppendActiveHelp(comps, "You must choose a name for the repo you are adding") - } - } else if len(args) == 1 { - if activeHelpLevel != "off" { - comps = cobra.AppendActiveHelp(comps, "You must specify the URL for the repo you are adding") - } - } else { - if activeHelpLevel == "local" { - comps = cobra.AppendActiveHelp(comps, "This command does not take any more arguments") - } - } - return comps, cobra.ShellCompDirectiveNoFileComp -}, -``` -**Note 1**: If the `_ACTIVE_HELP` environment variable is set to the string "0", Cobra will automatically disable all Active Help output (even if some output was specified by the program using the `cobra.AppendActiveHelp(...)` function). Using "0" can simplify your code in situations where you want to blindly disable Active Help without having to call `cobra.GetActiveHelpConfig(cmd)` explicitly. - -**Note 2**: If a user wants to disable Active Help for every single program based on Cobra, she can set the environment variable `COBRA_ACTIVE_HELP` to "0". In this case `cobra.GetActiveHelpConfig(cmd)` will return "0" no matter what the variable `_ACTIVE_HELP` is set to. - -**Note 3**: If the user does not set `_ACTIVE_HELP` or `COBRA_ACTIVE_HELP` (which will be a common case), the default value for the Active Help configuration returned by `cobra.GetActiveHelpConfig(cmd)` will be the empty string. -## Active Help with Cobra's default completion command - -Cobra provides a default `completion` command for programs that wish to use it. -When using the default `completion` command, Active Help is configurable in the same -fashion as described above using environment variables. You may wish to document this in more -details for your users. - -## Debugging Active Help - -Debugging your Active Help code is done in the same way as debugging your dynamic completion code, which is with Cobra's hidden `__complete` command. Please refer to [debugging shell completion](shell_completions.md#debugging) for details. - -When debugging with the `__complete` command, if you want to specify different Active Help configurations, you should use the active help environment variable. That variable is named `_ACTIVE_HELP` where any `-` is replaced by an `_`. For example, we can test deactivating some Active Help as shown below: -``` -$ HELM_ACTIVE_HELP=1 bin/helm __complete install wordpress bitnami/h -bitnami/haproxy -bitnami/harbor -_activeHelp_ WARNING: cannot re-use a name that is still in use -:0 -Completion ended with directive: ShellCompDirectiveDefault - -$ HELM_ACTIVE_HELP=0 bin/helm __complete install wordpress bitnami/h -bitnami/haproxy -bitnami/harbor -:0 -Completion ended with directive: ShellCompDirectiveDefault -``` diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go index 10c78847..8a531518 100644 --- a/vendor/github.com/spf13/cobra/bash_completions.go +++ b/vendor/github.com/spf13/cobra/bash_completions.go @@ -85,7 +85,7 @@ __%[1]s_handle_go_custom_completion() local out requestComp lastParam lastChar comp directive args # Prepare the command to request completions for the program. - # Calling ${words[0]} instead of directly %[1]s allows to handle aliases + # Calling ${words[0]} instead of directly %[1]s allows handling aliases args=("${words[@]:1}") # Disable ActiveHelp which is not supported for bash completion v1 requestComp="%[8]s=0 ${words[0]} %[2]s ${args[*]}" diff --git a/vendor/github.com/spf13/cobra/bash_completions.md b/vendor/github.com/spf13/cobra/bash_completions.md deleted file mode 100644 index 52919b2f..00000000 --- a/vendor/github.com/spf13/cobra/bash_completions.md +++ /dev/null @@ -1,93 +0,0 @@ -# Generating Bash Completions For Your cobra.Command - -Please refer to [Shell Completions](shell_completions.md) for details. - -## Bash legacy dynamic completions - -For backward compatibility, Cobra still supports its legacy dynamic completion solution (described below). Unlike the `ValidArgsFunction` solution, the legacy solution will only work for Bash shell-completion and not for other shells. This legacy solution can be used along-side `ValidArgsFunction` and `RegisterFlagCompletionFunc()`, as long as both solutions are not used for the same command. This provides a path to gradually migrate from the legacy solution to the new solution. - -**Note**: Cobra's default `completion` command uses bash completion V2. If you are currently using Cobra's legacy dynamic completion solution, you should not use the default `completion` command but continue using your own. - -The legacy solution allows you to inject bash functions into the bash completion script. Those bash functions are responsible for providing the completion choices for your own completions. - -Some code that works in kubernetes: - -```bash -const ( - bash_completion_func = `__kubectl_parse_get() -{ - local kubectl_output out - if kubectl_output=$(kubectl get --no-headers "$1" 2>/dev/null); then - out=($(echo "${kubectl_output}" | awk '{print $1}')) - COMPREPLY=( $( compgen -W "${out[*]}" -- "$cur" ) ) - fi -} - -__kubectl_get_resource() -{ - if [[ ${#nouns[@]} -eq 0 ]]; then - return 1 - fi - __kubectl_parse_get ${nouns[${#nouns[@]} -1]} - if [[ $? -eq 0 ]]; then - return 0 - fi -} - -__kubectl_custom_func() { - case ${last_command} in - kubectl_get | kubectl_describe | kubectl_delete | kubectl_stop) - __kubectl_get_resource - return - ;; - *) - ;; - esac -} -`) -``` - -And then I set that in my command definition: - -```go -cmds := &cobra.Command{ - Use: "kubectl", - Short: "kubectl controls the Kubernetes cluster manager", - Long: `kubectl controls the Kubernetes cluster manager. - -Find more information at https://github.com/GoogleCloudPlatform/kubernetes.`, - Run: runHelp, - BashCompletionFunction: bash_completion_func, -} -``` - -The `BashCompletionFunction` option is really only valid/useful on the root command. Doing the above will cause `__kubectl_custom_func()` (`___custom_func()`) to be called when the built in processor was unable to find a solution. In the case of kubernetes a valid command might look something like `kubectl get pod [mypod]`. If you type `kubectl get pod [tab][tab]` the `__kubectl_customc_func()` will run because the cobra.Command only understood "kubectl" and "get." `__kubectl_custom_func()` will see that the cobra.Command is "kubectl_get" and will thus call another helper `__kubectl_get_resource()`. `__kubectl_get_resource` will look at the 'nouns' collected. In our example the only noun will be `pod`. So it will call `__kubectl_parse_get pod`. `__kubectl_parse_get` will actually call out to kubernetes and get any pods. It will then set `COMPREPLY` to valid pods! - -Similarly, for flags: - -```go - annotation := make(map[string][]string) - annotation[cobra.BashCompCustom] = []string{"__kubectl_get_namespaces"} - - flag := &pflag.Flag{ - Name: "namespace", - Usage: usage, - Annotations: annotation, - } - cmd.Flags().AddFlag(flag) -``` - -In addition add the `__kubectl_get_namespaces` implementation in the `BashCompletionFunction` -value, e.g.: - -```bash -__kubectl_get_namespaces() -{ - local template - template="{{ range .items }}{{ .metadata.name }} {{ end }}" - local kubectl_out - if kubectl_out=$(kubectl get -o template --template="${template}" namespace 2>/dev/null); then - COMPREPLY=( $( compgen -W "${kubectl_out}[*]" -- "$cur" ) ) - fi -} -``` diff --git a/vendor/github.com/spf13/cobra/bash_completionsV2.go b/vendor/github.com/spf13/cobra/bash_completionsV2.go index 19b09560..1cce5c32 100644 --- a/vendor/github.com/spf13/cobra/bash_completionsV2.go +++ b/vendor/github.com/spf13/cobra/bash_completionsV2.go @@ -57,7 +57,7 @@ __%[1]s_get_completion_results() { local requestComp lastParam lastChar args # Prepare the command to request completions for the program. - # Calling ${words[0]} instead of directly %[1]s allows to handle aliases + # Calling ${words[0]} instead of directly %[1]s allows handling aliases args=("${words[@]:1}") requestComp="${words[0]} %[2]s ${args[*]}" diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go index b07b44a0..a6b160ce 100644 --- a/vendor/github.com/spf13/cobra/cobra.go +++ b/vendor/github.com/spf13/cobra/cobra.go @@ -43,12 +43,13 @@ var initializers []func() var finalizers []func() const ( - defaultPrefixMatching = false - defaultCommandSorting = true - defaultCaseInsensitive = false + defaultPrefixMatching = false + defaultCommandSorting = true + defaultCaseInsensitive = false + defaultTraverseRunHooks = false ) -// EnablePrefixMatching allows to set automatic prefix matching. Automatic prefix matching can be a dangerous thing +// EnablePrefixMatching allows setting automatic prefix matching. Automatic prefix matching can be a dangerous thing // to automatically enable in CLI tools. // Set this to true to enable it. var EnablePrefixMatching = defaultPrefixMatching @@ -60,6 +61,10 @@ var EnableCommandSorting = defaultCommandSorting // EnableCaseInsensitive allows case-insensitive commands names. (case sensitive by default) var EnableCaseInsensitive = defaultCaseInsensitive +// EnableTraverseRunHooks executes persistent pre-run and post-run hooks from all parents. +// By default this is disabled, which means only the first run hook to be found is executed. +var EnableTraverseRunHooks = defaultTraverseRunHooks + // MousetrapHelpText enables an information splash screen on Windows // if the CLI is started from explorer.exe. // To disable the mousetrap, just set this variable to blank string (""). diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go index 01f7c6f1..2fbe6c13 100644 --- a/vendor/github.com/spf13/cobra/command.go +++ b/vendor/github.com/spf13/cobra/command.go @@ -30,7 +30,10 @@ import ( flag "github.com/spf13/pflag" ) -const FlagSetByCobraAnnotation = "cobra_annotation_flag_set_by_cobra" +const ( + FlagSetByCobraAnnotation = "cobra_annotation_flag_set_by_cobra" + CommandDisplayNameAnnotation = "cobra_annotation_command_display_name" +) // FParseErrWhitelist configures Flag parse errors to be ignored type FParseErrWhitelist flag.ParseErrorsWhitelist @@ -99,7 +102,7 @@ type Command struct { Deprecated string // Annotations are key/value pairs that can be used by applications to identify or - // group commands. + // group commands or set special options. Annotations map[string]string // Version defines the version for this command. If this value is non-empty and the command does not @@ -115,6 +118,8 @@ type Command struct { // * PostRun() // * PersistentPostRun() // All functions get the same args, the arguments after the command name. + // The *PreRun and *PostRun functions will only be executed if the Run function of the current + // command has been declared. // // PersistentPreRun: children of this command will inherit and execute. PersistentPreRun func(cmd *Command, args []string) @@ -181,6 +186,9 @@ type Command struct { // versionTemplate is the version template defined by user. versionTemplate string + // errPrefix is the error message prefix defined by user. + errPrefix string + // inReader is a reader defined by the user that replaces stdin inReader io.Reader // outWriter is a writer defined by the user that replaces stdout @@ -346,6 +354,11 @@ func (c *Command) SetVersionTemplate(s string) { c.versionTemplate = s } +// SetErrPrefix sets error message prefix to be used. Application can use it to set custom prefix. +func (c *Command) SetErrPrefix(s string) { + c.errPrefix = s +} + // SetGlobalNormalizationFunc sets a normalization function to all flag sets and also to child commands. // The user should not have a cyclic dependency on commands. func (c *Command) SetGlobalNormalizationFunc(n func(f *flag.FlagSet, name string) flag.NormalizedName) { @@ -595,6 +608,18 @@ func (c *Command) VersionTemplate() string { ` } +// ErrPrefix return error message prefix for the command +func (c *Command) ErrPrefix() string { + if c.errPrefix != "" { + return c.errPrefix + } + + if c.HasParent() { + return c.parent.ErrPrefix() + } + return "Error:" +} + func hasNoOptDefVal(name string, fs *flag.FlagSet) bool { flag := fs.Lookup(name) if flag == nil { @@ -752,7 +777,9 @@ func (c *Command) findNext(next string) *Command { } if len(matches) == 1 { - return matches[0] + // Temporarily disable gosec G602, which produces a false positive. + // See https://github.com/securego/gosec/issues/1005. + return matches[0] // #nosec G602 } return nil @@ -910,15 +937,31 @@ func (c *Command) execute(a []string) (err error) { return err } + parents := make([]*Command, 0, 5) for p := c; p != nil; p = p.Parent() { + if EnableTraverseRunHooks { + // When EnableTraverseRunHooks is set: + // - Execute all persistent pre-runs from the root parent till this command. + // - Execute all persistent post-runs from this command till the root parent. + parents = append([]*Command{p}, parents...) + } else { + // Otherwise, execute only the first found persistent hook. + parents = append(parents, p) + } + } + for _, p := range parents { if p.PersistentPreRunE != nil { if err := p.PersistentPreRunE(c, argWoFlags); err != nil { return err } - break + if !EnableTraverseRunHooks { + break + } } else if p.PersistentPreRun != nil { p.PersistentPreRun(c, argWoFlags) - break + if !EnableTraverseRunHooks { + break + } } } if c.PreRunE != nil { @@ -955,10 +998,14 @@ func (c *Command) execute(a []string) (err error) { if err := p.PersistentPostRunE(c, argWoFlags); err != nil { return err } - break + if !EnableTraverseRunHooks { + break + } } else if p.PersistentPostRun != nil { p.PersistentPostRun(c, argWoFlags) - break + if !EnableTraverseRunHooks { + break + } } } @@ -1048,7 +1095,7 @@ func (c *Command) ExecuteC() (cmd *Command, err error) { c = cmd } if !c.SilenceErrors { - c.PrintErrln("Error:", err.Error()) + c.PrintErrln(c.ErrPrefix(), err.Error()) c.PrintErrf("Run '%v --help' for usage.\n", c.CommandPath()) } return c, err @@ -1077,7 +1124,7 @@ func (c *Command) ExecuteC() (cmd *Command, err error) { // If root command has SilenceErrors flagged, // all subcommands should respect it if !cmd.SilenceErrors && !c.SilenceErrors { - c.PrintErrln("Error:", err.Error()) + c.PrintErrln(cmd.ErrPrefix(), err.Error()) } // If root command has SilenceUsage flagged, @@ -1380,6 +1427,9 @@ func (c *Command) CommandPath() string { if c.HasParent() { return c.Parent().CommandPath() + " " + c.Name() } + if displayName, ok := c.Annotations[CommandDisplayNameAnnotation]; ok { + return displayName + } return c.Name() } @@ -1402,6 +1452,7 @@ func (c *Command) UseLine() string { // DebugFlags used to determine which flags have been assigned to which commands // and which persist. +// nolint:goconst func (c *Command) DebugFlags() { c.Println("DebugFlags called on", c.Name()) var debugflags func(*Command) diff --git a/vendor/github.com/spf13/cobra/completions.go b/vendor/github.com/spf13/cobra/completions.go index ee38c4d0..b60f6b20 100644 --- a/vendor/github.com/spf13/cobra/completions.go +++ b/vendor/github.com/spf13/cobra/completions.go @@ -145,6 +145,20 @@ func (c *Command) RegisterFlagCompletionFunc(flagName string, f func(cmd *Comman return nil } +// GetFlagCompletionFunc returns the completion function for the given flag of the command, if available. +func (c *Command) GetFlagCompletionFunc(flagName string) (func(*Command, []string, string) ([]string, ShellCompDirective), bool) { + flag := c.Flag(flagName) + if flag == nil { + return nil, false + } + + flagCompletionMutex.RLock() + defer flagCompletionMutex.RUnlock() + + completionFunc, exists := flagCompletionFunctions[flag] + return completionFunc, exists +} + // Returns a string listing the different directive enabled in the specified parameter func (d ShellCompDirective) string() string { var directives []string @@ -283,9 +297,13 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi // These flags are normally added when `execute()` is called on `finalCmd`, // however, when doing completion, we don't call `finalCmd.execute()`. - // Let's add the --help and --version flag ourselves. - finalCmd.InitDefaultHelpFlag() - finalCmd.InitDefaultVersionFlag() + // Let's add the --help and --version flag ourselves but only if the finalCmd + // has not disabled flag parsing; if flag parsing is disabled, it is up to the + // finalCmd itself to handle the completion of *all* flags. + if !finalCmd.DisableFlagParsing { + finalCmd.InitDefaultHelpFlag() + finalCmd.InitDefaultVersionFlag() + } // Check if we are doing flag value completion before parsing the flags. // This is important because if we are completing a flag value, we need to also @@ -389,6 +407,11 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi finalCmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) { doCompleteFlags(flag) }) + // Try to complete non-inherited flags even if DisableFlagParsing==true. + // This allows programs to tell Cobra about flags for completion even + // if the actual parsing of flags is not done by Cobra. + // For instance, Helm uses this to provide flag name completion for + // some of its plugins. finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { doCompleteFlags(flag) }) diff --git a/vendor/github.com/spf13/cobra/fish_completions.go b/vendor/github.com/spf13/cobra/fish_completions.go index 12ca0d2b..12d61b69 100644 --- a/vendor/github.com/spf13/cobra/fish_completions.go +++ b/vendor/github.com/spf13/cobra/fish_completions.go @@ -113,7 +113,7 @@ function __%[1]s_clear_perform_completion_once_result __%[1]s_debug "" __%[1]s_debug "========= clearing previously set __%[1]s_perform_completion_once_result variable ==========" set --erase __%[1]s_perform_completion_once_result - __%[1]s_debug "Succesfully erased the variable __%[1]s_perform_completion_once_result" + __%[1]s_debug "Successfully erased the variable __%[1]s_perform_completion_once_result" end function __%[1]s_requires_order_preservation diff --git a/vendor/github.com/spf13/cobra/fish_completions.md b/vendor/github.com/spf13/cobra/fish_completions.md deleted file mode 100644 index 19b2ed12..00000000 --- a/vendor/github.com/spf13/cobra/fish_completions.md +++ /dev/null @@ -1,4 +0,0 @@ -## Generating Fish Completions For Your cobra.Command - -Please refer to [Shell Completions](shell_completions.md) for details. - diff --git a/vendor/github.com/spf13/cobra/flag_groups.go b/vendor/github.com/spf13/cobra/flag_groups.go index b35fde15..0671ec5f 100644 --- a/vendor/github.com/spf13/cobra/flag_groups.go +++ b/vendor/github.com/spf13/cobra/flag_groups.go @@ -24,6 +24,7 @@ import ( const ( requiredAsGroup = "cobra_annotation_required_if_others_set" + oneRequired = "cobra_annotation_one_required" mutuallyExclusive = "cobra_annotation_mutually_exclusive" ) @@ -43,6 +44,22 @@ func (c *Command) MarkFlagsRequiredTogether(flagNames ...string) { } } +// MarkFlagsOneRequired marks the given flags with annotations so that Cobra errors +// if the command is invoked without at least one flag from the given set of flags. +func (c *Command) MarkFlagsOneRequired(flagNames ...string) { + c.mergePersistentFlags() + for _, v := range flagNames { + f := c.Flags().Lookup(v) + if f == nil { + panic(fmt.Sprintf("Failed to find flag %q and mark it as being in a one-required flag group", v)) + } + if err := c.Flags().SetAnnotation(v, oneRequired, append(f.Annotations[oneRequired], strings.Join(flagNames, " "))); err != nil { + // Only errs if the flag isn't found. + panic(err) + } + } +} + // MarkFlagsMutuallyExclusive marks the given flags with annotations so that Cobra errors // if the command is invoked with more than one flag from the given set of flags. func (c *Command) MarkFlagsMutuallyExclusive(flagNames ...string) { @@ -59,7 +76,7 @@ func (c *Command) MarkFlagsMutuallyExclusive(flagNames ...string) { } } -// ValidateFlagGroups validates the mutuallyExclusive/requiredAsGroup logic and returns the +// ValidateFlagGroups validates the mutuallyExclusive/oneRequired/requiredAsGroup logic and returns the // first error encountered. func (c *Command) ValidateFlagGroups() error { if c.DisableFlagParsing { @@ -71,15 +88,20 @@ func (c *Command) ValidateFlagGroups() error { // groupStatus format is the list of flags as a unique ID, // then a map of each flag name and whether it is set or not. groupStatus := map[string]map[string]bool{} + oneRequiredGroupStatus := map[string]map[string]bool{} mutuallyExclusiveGroupStatus := map[string]map[string]bool{} flags.VisitAll(func(pflag *flag.Flag) { processFlagForGroupAnnotation(flags, pflag, requiredAsGroup, groupStatus) + processFlagForGroupAnnotation(flags, pflag, oneRequired, oneRequiredGroupStatus) processFlagForGroupAnnotation(flags, pflag, mutuallyExclusive, mutuallyExclusiveGroupStatus) }) if err := validateRequiredFlagGroups(groupStatus); err != nil { return err } + if err := validateOneRequiredFlagGroups(oneRequiredGroupStatus); err != nil { + return err + } if err := validateExclusiveFlagGroups(mutuallyExclusiveGroupStatus); err != nil { return err } @@ -142,6 +164,27 @@ func validateRequiredFlagGroups(data map[string]map[string]bool) error { return nil } +func validateOneRequiredFlagGroups(data map[string]map[string]bool) error { + keys := sortedKeys(data) + for _, flagList := range keys { + flagnameAndStatus := data[flagList] + var set []string + for flagname, isSet := range flagnameAndStatus { + if isSet { + set = append(set, flagname) + } + } + if len(set) >= 1 { + continue + } + + // Sort values, so they can be tested/scripted against consistently. + sort.Strings(set) + return fmt.Errorf("at least one of the flags in the group [%v] is required", flagList) + } + return nil +} + func validateExclusiveFlagGroups(data map[string]map[string]bool) error { keys := sortedKeys(data) for _, flagList := range keys { @@ -176,6 +219,7 @@ func sortedKeys(m map[string]map[string]bool) []string { // enforceFlagGroupsForCompletion will do the following: // - when a flag in a group is present, other flags in the group will be marked required +// - when none of the flags in a one-required group are present, all flags in the group will be marked required // - when a flag in a mutually exclusive group is present, other flags in the group will be marked as hidden // This allows the standard completion logic to behave appropriately for flag groups func (c *Command) enforceFlagGroupsForCompletion() { @@ -185,9 +229,11 @@ func (c *Command) enforceFlagGroupsForCompletion() { flags := c.Flags() groupStatus := map[string]map[string]bool{} + oneRequiredGroupStatus := map[string]map[string]bool{} mutuallyExclusiveGroupStatus := map[string]map[string]bool{} c.Flags().VisitAll(func(pflag *flag.Flag) { processFlagForGroupAnnotation(flags, pflag, requiredAsGroup, groupStatus) + processFlagForGroupAnnotation(flags, pflag, oneRequired, oneRequiredGroupStatus) processFlagForGroupAnnotation(flags, pflag, mutuallyExclusive, mutuallyExclusiveGroupStatus) }) @@ -204,6 +250,26 @@ func (c *Command) enforceFlagGroupsForCompletion() { } } + // If none of the flags of a one-required group are present, we make all the flags + // of that group required so that the shell completion suggests them automatically + for flagList, flagnameAndStatus := range oneRequiredGroupStatus { + set := 0 + + for _, isSet := range flagnameAndStatus { + if isSet { + set++ + } + } + + // None of the flags of the group are set, mark all flags in the group + // as required + if set == 0 { + for _, fName := range strings.Split(flagList, " ") { + _ = c.MarkFlagRequired(fName) + } + } + } + // If a flag that is mutually exclusive to others is present, we hide the other // flags of that group so the shell completion does not suggest them for flagList, flagnameAndStatus := range mutuallyExclusiveGroupStatus { diff --git a/vendor/github.com/spf13/cobra/powershell_completions.go b/vendor/github.com/spf13/cobra/powershell_completions.go index 177d2755..55195193 100644 --- a/vendor/github.com/spf13/cobra/powershell_completions.go +++ b/vendor/github.com/spf13/cobra/powershell_completions.go @@ -47,7 +47,7 @@ filter __%[1]s_escapeStringWithSpecialChars { `+" $_ -replace '\\s|#|@|\\$|;|,|''|\\{|\\}|\\(|\\)|\"|`|\\||<|>|&','`$&'"+` } -[scriptblock]$__%[2]sCompleterBlock = { +[scriptblock]${__%[2]sCompleterBlock} = { param( $WordToComplete, $CommandAst, @@ -122,7 +122,7 @@ filter __%[1]s_escapeStringWithSpecialChars { __%[1]s_debug "Calling $RequestComp" # First disable ActiveHelp which is not supported for Powershell - $env:%[10]s=0 + ${env:%[10]s}=0 #call the command store the output in $out and redirect stderr and stdout to null # $Out is an array contains each line per element @@ -279,7 +279,7 @@ filter __%[1]s_escapeStringWithSpecialChars { } } -Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock $__%[2]sCompleterBlock +Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock ${__%[2]sCompleterBlock} `, name, nameForVar, compCmd, ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder, activeHelpEnvVar(name))) diff --git a/vendor/github.com/spf13/cobra/powershell_completions.md b/vendor/github.com/spf13/cobra/powershell_completions.md deleted file mode 100644 index c449f1e5..00000000 --- a/vendor/github.com/spf13/cobra/powershell_completions.md +++ /dev/null @@ -1,3 +0,0 @@ -# Generating PowerShell Completions For Your Own cobra.Command - -Please refer to [Shell Completions](shell_completions.md#powershell-completions) for details. diff --git a/vendor/github.com/spf13/cobra/projects_using_cobra.md b/vendor/github.com/spf13/cobra/projects_using_cobra.md deleted file mode 100644 index 8a291eb2..00000000 --- a/vendor/github.com/spf13/cobra/projects_using_cobra.md +++ /dev/null @@ -1,64 +0,0 @@ -## Projects using Cobra - -- [Allero](https://github.com/allero-io/allero) -- [Arewefastyet](https://benchmark.vitess.io) -- [Arduino CLI](https://github.com/arduino/arduino-cli) -- [Bleve](https://blevesearch.com/) -- [Cilium](https://cilium.io/) -- [CloudQuery](https://github.com/cloudquery/cloudquery) -- [CockroachDB](https://www.cockroachlabs.com/) -- [Constellation](https://github.com/edgelesssys/constellation) -- [Cosmos SDK](https://github.com/cosmos/cosmos-sdk) -- [Datree](https://github.com/datreeio/datree) -- [Delve](https://github.com/derekparker/delve) -- [Docker (distribution)](https://github.com/docker/distribution) -- [Etcd](https://etcd.io/) -- [Gardener](https://github.com/gardener/gardenctl) -- [Giant Swarm's gsctl](https://github.com/giantswarm/gsctl) -- [Git Bump](https://github.com/erdaltsksn/git-bump) -- [GitHub CLI](https://github.com/cli/cli) -- [GitHub Labeler](https://github.com/erdaltsksn/gh-label) -- [Golangci-lint](https://golangci-lint.run) -- [GopherJS](https://github.com/gopherjs/gopherjs) -- [GoReleaser](https://goreleaser.com) -- [Helm](https://helm.sh) -- [Hugo](https://gohugo.io) -- [Infracost](https://github.com/infracost/infracost) -- [Istio](https://istio.io) -- [Kool](https://github.com/kool-dev/kool) -- [Kubernetes](https://kubernetes.io/) -- [Kubescape](https://github.com/kubescape/kubescape) -- [KubeVirt](https://github.com/kubevirt/kubevirt) -- [Linkerd](https://linkerd.io/) -- [Mattermost-server](https://github.com/mattermost/mattermost-server) -- [Mercure](https://mercure.rocks/) -- [Meroxa CLI](https://github.com/meroxa/cli) -- [Metal Stack CLI](https://github.com/metal-stack/metalctl) -- [Moby (former Docker)](https://github.com/moby/moby) -- [Moldy](https://github.com/Moldy-Community/moldy) -- [Multi-gitter](https://github.com/lindell/multi-gitter) -- [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack) -- [nFPM](https://nfpm.goreleaser.com) -- [Okteto](https://github.com/okteto/okteto) -- [OpenShift](https://www.openshift.com/) -- [Ory Hydra](https://github.com/ory/hydra) -- [Ory Kratos](https://github.com/ory/kratos) -- [Pixie](https://github.com/pixie-io/pixie) -- [Polygon Edge](https://github.com/0xPolygon/polygon-edge) -- [Pouch](https://github.com/alibaba/pouch) -- [ProjectAtomic (enterprise)](https://www.projectatomic.io/) -- [Prototool](https://github.com/uber/prototool) -- [Pulumi](https://www.pulumi.com) -- [QRcp](https://github.com/claudiodangelis/qrcp) -- [Random](https://github.com/erdaltsksn/random) -- [Rclone](https://rclone.org/) -- [Scaleway CLI](https://github.com/scaleway/scaleway-cli) -- [Sia](https://github.com/SiaFoundation/siad) -- [Skaffold](https://skaffold.dev/) -- [Tendermint](https://github.com/tendermint/tendermint) -- [Twitch CLI](https://github.com/twitchdev/twitch-cli) -- [UpCloud CLI (`upctl`)](https://github.com/UpCloudLtd/upcloud-cli) -- [Vitess](https://vitess.io) -- VMware's [Tanzu Community Edition](https://github.com/vmware-tanzu/community-edition) & [Tanzu Framework](https://github.com/vmware-tanzu/tanzu-framework) -- [Werf](https://werf.io/) -- [ZITADEL](https://github.com/zitadel/zitadel) diff --git a/vendor/github.com/spf13/cobra/shell_completions.md b/vendor/github.com/spf13/cobra/shell_completions.md deleted file mode 100644 index 065c0621..00000000 --- a/vendor/github.com/spf13/cobra/shell_completions.md +++ /dev/null @@ -1,576 +0,0 @@ -# Generating shell completions - -Cobra can generate shell completions for multiple shells. -The currently supported shells are: -- Bash -- Zsh -- fish -- PowerShell - -Cobra will automatically provide your program with a fully functional `completion` command, -similarly to how it provides the `help` command. - -## Creating your own completion command - -If you do not wish to use the default `completion` command, you can choose to -provide your own, which will take precedence over the default one. (This also provides -backwards-compatibility with programs that already have their own `completion` command.) - -If you are using the `cobra-cli` generator, -which can be found at [spf13/cobra-cli](https://github.com/spf13/cobra-cli), -you can create a completion command by running - -```bash -cobra-cli add completion -``` -and then modifying the generated `cmd/completion.go` file to look something like this -(writing the shell script to stdout allows the most flexible use): - -```go -var completionCmd = &cobra.Command{ - Use: "completion [bash|zsh|fish|powershell]", - Short: "Generate completion script", - Long: fmt.Sprintf(`To load completions: - -Bash: - - $ source <(%[1]s completion bash) - - # To load completions for each session, execute once: - # Linux: - $ %[1]s completion bash > /etc/bash_completion.d/%[1]s - # macOS: - $ %[1]s completion bash > $(brew --prefix)/etc/bash_completion.d/%[1]s - -Zsh: - - # If shell completion is not already enabled in your environment, - # you will need to enable it. You can execute the following once: - - $ echo "autoload -U compinit; compinit" >> ~/.zshrc - - # To load completions for each session, execute once: - $ %[1]s completion zsh > "${fpath[1]}/_%[1]s" - - # You will need to start a new shell for this setup to take effect. - -fish: - - $ %[1]s completion fish | source - - # To load completions for each session, execute once: - $ %[1]s completion fish > ~/.config/fish/completions/%[1]s.fish - -PowerShell: - - PS> %[1]s completion powershell | Out-String | Invoke-Expression - - # To load completions for every new session, run: - PS> %[1]s completion powershell > %[1]s.ps1 - # and source this file from your PowerShell profile. -`,cmd.Root().Name()), - DisableFlagsInUseLine: true, - ValidArgs: []string{"bash", "zsh", "fish", "powershell"}, - Args: cobra.MatchAll(cobra.ExactArgs(1), cobra.OnlyValidArgs), - Run: func(cmd *cobra.Command, args []string) { - switch args[0] { - case "bash": - cmd.Root().GenBashCompletion(os.Stdout) - case "zsh": - cmd.Root().GenZshCompletion(os.Stdout) - case "fish": - cmd.Root().GenFishCompletion(os.Stdout, true) - case "powershell": - cmd.Root().GenPowerShellCompletionWithDesc(os.Stdout) - } - }, -} -``` - -**Note:** The cobra generator may include messages printed to stdout, for example, if the config file is loaded; this will break the auto-completion script so must be removed. - -## Adapting the default completion command - -Cobra provides a few options for the default `completion` command. To configure such options you must set -the `CompletionOptions` field on the *root* command. - -To tell Cobra *not* to provide the default `completion` command: -``` -rootCmd.CompletionOptions.DisableDefaultCmd = true -``` - -To tell Cobra to mark the default `completion` command as *hidden*: -``` -rootCmd.CompletionOptions.HiddenDefaultCmd = true -``` - -To tell Cobra *not* to provide the user with the `--no-descriptions` flag to the completion sub-commands: -``` -rootCmd.CompletionOptions.DisableNoDescFlag = true -``` - -To tell Cobra to completely disable descriptions for completions: -``` -rootCmd.CompletionOptions.DisableDescriptions = true -``` - -# Customizing completions - -The generated completion scripts will automatically handle completing commands and flags. However, you can make your completions much more powerful by providing information to complete your program's nouns and flag values. - -## Completion of nouns - -### Static completion of nouns - -Cobra allows you to provide a pre-defined list of completion choices for your nouns using the `ValidArgs` field. -For example, if you want `kubectl get [tab][tab]` to show a list of valid "nouns" you have to set them. -Some simplified code from `kubectl get` looks like: - -```go -validArgs = []string{ "pod", "node", "service", "replicationcontroller" } - -cmd := &cobra.Command{ - Use: "get [(-o|--output=)json|yaml|template|...] (RESOURCE [NAME] | RESOURCE/NAME ...)", - Short: "Display one or many resources", - Long: get_long, - Example: get_example, - Run: func(cmd *cobra.Command, args []string) { - cobra.CheckErr(RunGet(f, out, cmd, args)) - }, - ValidArgs: validArgs, -} -``` - -Notice we put the `ValidArgs` field on the `get` sub-command. Doing so will give results like: - -```bash -$ kubectl get [tab][tab] -node pod replicationcontroller service -``` - -#### Aliases for nouns - -If your nouns have aliases, you can define them alongside `ValidArgs` using `ArgAliases`: - -```go -argAliases = []string { "pods", "nodes", "services", "svc", "replicationcontrollers", "rc" } - -cmd := &cobra.Command{ - ... - ValidArgs: validArgs, - ArgAliases: argAliases -} -``` - -The aliases are shown to the user on tab completion only if no completions were found within sub-commands or `ValidArgs`. - -### Dynamic completion of nouns - -In some cases it is not possible to provide a list of completions in advance. Instead, the list of completions must be determined at execution-time. In a similar fashion as for static completions, you can use the `ValidArgsFunction` field to provide a Go function that Cobra will execute when it needs the list of completion choices for the nouns of a command. Note that either `ValidArgs` or `ValidArgsFunction` can be used for a single cobra command, but not both. -Simplified code from `helm status` looks like: - -```go -cmd := &cobra.Command{ - Use: "status RELEASE_NAME", - Short: "Display the status of the named release", - Long: status_long, - RunE: func(cmd *cobra.Command, args []string) { - RunGet(args[0]) - }, - ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - if len(args) != 0 { - return nil, cobra.ShellCompDirectiveNoFileComp - } - return getReleasesFromCluster(toComplete), cobra.ShellCompDirectiveNoFileComp - }, -} -``` -Where `getReleasesFromCluster()` is a Go function that obtains the list of current Helm releases running on the Kubernetes cluster. -Notice we put the `ValidArgsFunction` on the `status` sub-command. Let's assume the Helm releases on the cluster are: `harbor`, `notary`, `rook` and `thanos` then this dynamic completion will give results like: - -```bash -$ helm status [tab][tab] -harbor notary rook thanos -``` -You may have noticed the use of `cobra.ShellCompDirective`. These directives are bit fields allowing to control some shell completion behaviors for your particular completion. You can combine them with the bit-or operator such as `cobra.ShellCompDirectiveNoSpace | cobra.ShellCompDirectiveNoFileComp` -```go -// Indicates that the shell will perform its default behavior after completions -// have been provided (this implies none of the other directives). -ShellCompDirectiveDefault - -// Indicates an error occurred and completions should be ignored. -ShellCompDirectiveError - -// Indicates that the shell should not add a space after the completion, -// even if there is a single completion provided. -ShellCompDirectiveNoSpace - -// Indicates that the shell should not provide file completion even when -// no completion is provided. -ShellCompDirectiveNoFileComp - -// Indicates that the returned completions should be used as file extension filters. -// For example, to complete only files of the form *.json or *.yaml: -// return []string{"yaml", "json"}, ShellCompDirectiveFilterFileExt -// For flags, using MarkFlagFilename() and MarkPersistentFlagFilename() -// is a shortcut to using this directive explicitly. -// -ShellCompDirectiveFilterFileExt - -// Indicates that only directory names should be provided in file completion. -// For example: -// return nil, ShellCompDirectiveFilterDirs -// For flags, using MarkFlagDirname() is a shortcut to using this directive explicitly. -// -// To request directory names within another directory, the returned completions -// should specify a single directory name within which to search. For example, -// to complete directories within "themes/": -// return []string{"themes"}, ShellCompDirectiveFilterDirs -// -ShellCompDirectiveFilterDirs - -// ShellCompDirectiveKeepOrder indicates that the shell should preserve the order -// in which the completions are provided -ShellCompDirectiveKeepOrder -``` - -***Note***: When using the `ValidArgsFunction`, Cobra will call your registered function after having parsed all flags and arguments provided in the command-line. You therefore don't need to do this parsing yourself. For example, when a user calls `helm status --namespace my-rook-ns [tab][tab]`, Cobra will call your registered `ValidArgsFunction` after having parsed the `--namespace` flag, as it would have done when calling the `RunE` function. - -#### Debugging - -Cobra achieves dynamic completion through the use of a hidden command called by the completion script. To debug your Go completion code, you can call this hidden command directly: -```bash -$ helm __complete status har -harbor -:4 -Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr -``` -***Important:*** If the noun to complete is empty (when the user has not yet typed any letters of that noun), you must pass an empty parameter to the `__complete` command: -```bash -$ helm __complete status "" -harbor -notary -rook -thanos -:4 -Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr -``` -Calling the `__complete` command directly allows you to run the Go debugger to troubleshoot your code. You can also add printouts to your code; Cobra provides the following functions to use for printouts in Go completion code: -```go -// Prints to the completion script debug file (if BASH_COMP_DEBUG_FILE -// is set to a file path) and optionally prints to stderr. -cobra.CompDebug(msg string, printToStdErr bool) { -cobra.CompDebugln(msg string, printToStdErr bool) - -// Prints to the completion script debug file (if BASH_COMP_DEBUG_FILE -// is set to a file path) and to stderr. -cobra.CompError(msg string) -cobra.CompErrorln(msg string) -``` -***Important:*** You should **not** leave traces that print directly to stdout in your completion code as they will be interpreted as completion choices by the completion script. Instead, use the cobra-provided debugging traces functions mentioned above. - -## Completions for flags - -### Mark flags as required - -Most of the time completions will only show sub-commands. But if a flag is required to make a sub-command work, you probably want it to show up when the user types [tab][tab]. You can mark a flag as 'Required' like so: - -```go -cmd.MarkFlagRequired("pod") -cmd.MarkFlagRequired("container") -``` - -and you'll get something like - -```bash -$ kubectl exec [tab][tab] --c --container= -p --pod= -``` - -### Specify dynamic flag completion - -As for nouns, Cobra provides a way of defining dynamic completion of flags. To provide a Go function that Cobra will execute when it needs the list of completion choices for a flag, you must register the function using the `command.RegisterFlagCompletionFunc()` function. - -```go -flagName := "output" -cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return []string{"json", "table", "yaml"}, cobra.ShellCompDirectiveDefault -}) -``` -Notice that calling `RegisterFlagCompletionFunc()` is done through the `command` with which the flag is associated. In our example this dynamic completion will give results like so: - -```bash -$ helm status --output [tab][tab] -json table yaml -``` - -#### Debugging - -You can also easily debug your Go completion code for flags: -```bash -$ helm __complete status --output "" -json -table -yaml -:4 -Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr -``` -***Important:*** You should **not** leave traces that print to stdout in your completion code as they will be interpreted as completion choices by the completion script. Instead, use the cobra-provided debugging traces functions mentioned further above. - -### Specify valid filename extensions for flags that take a filename - -To limit completions of flag values to file names with certain extensions you can either use the different `MarkFlagFilename()` functions or a combination of `RegisterFlagCompletionFunc()` and `ShellCompDirectiveFilterFileExt`, like so: -```go -flagName := "output" -cmd.MarkFlagFilename(flagName, "yaml", "json") -``` -or -```go -flagName := "output" -cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return []string{"yaml", "json"}, ShellCompDirectiveFilterFileExt}) -``` - -### Limit flag completions to directory names - -To limit completions of flag values to directory names you can either use the `MarkFlagDirname()` functions or a combination of `RegisterFlagCompletionFunc()` and `ShellCompDirectiveFilterDirs`, like so: -```go -flagName := "output" -cmd.MarkFlagDirname(flagName) -``` -or -```go -flagName := "output" -cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return nil, cobra.ShellCompDirectiveFilterDirs -}) -``` -To limit completions of flag values to directory names *within another directory* you can use a combination of `RegisterFlagCompletionFunc()` and `ShellCompDirectiveFilterDirs` like so: -```go -flagName := "output" -cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return []string{"themes"}, cobra.ShellCompDirectiveFilterDirs -}) -``` -### Descriptions for completions - -Cobra provides support for completion descriptions. Such descriptions are supported for each shell -(however, for bash, it is only available in the [completion V2 version](#bash-completion-v2)). -For commands and flags, Cobra will provide the descriptions automatically, based on usage information. -For example, using zsh: -``` -$ helm s[tab] -search -- search for a keyword in charts -show -- show information of a chart -status -- displays the status of the named release -``` -while using fish: -``` -$ helm s[tab] -search (search for a keyword in charts) show (show information of a chart) status (displays the status of the named release) -``` - -Cobra allows you to add descriptions to your own completions. Simply add the description text after each completion, following a `\t` separator. This technique applies to completions returned by `ValidArgs`, `ValidArgsFunction` and `RegisterFlagCompletionFunc()`. For example: -```go -ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return []string{"harbor\tAn image registry", "thanos\tLong-term metrics"}, cobra.ShellCompDirectiveNoFileComp -}} -``` -or -```go -ValidArgs: []string{"bash\tCompletions for bash", "zsh\tCompletions for zsh"} -``` - -If you don't want to show descriptions in the completions, you can add `--no-descriptions` to the default `completion` command to disable them, like: - -```bash -$ source <(helm completion bash) -$ helm completion [tab][tab] -bash (generate autocompletion script for bash) powershell (generate autocompletion script for powershell) -fish (generate autocompletion script for fish) zsh (generate autocompletion script for zsh) - -$ source <(helm completion bash --no-descriptions) -$ helm completion [tab][tab] -bash fish powershell zsh -``` -## Bash completions - -### Dependencies - -The bash completion script generated by Cobra requires the `bash_completion` package. You should update the help text of your completion command to show how to install the `bash_completion` package ([Kubectl docs](https://kubernetes.io/docs/tasks/tools/install-kubectl/#enabling-shell-autocompletion)) - -### Aliases - -You can also configure `bash` aliases for your program and they will also support completions. - -```bash -alias aliasname=origcommand -complete -o default -F __start_origcommand aliasname - -# and now when you run `aliasname` completion will make -# suggestions as it did for `origcommand`. - -$ aliasname -completion firstcommand secondcommand -``` -### Bash legacy dynamic completions - -For backward compatibility, Cobra still supports its bash legacy dynamic completion solution. -Please refer to [Bash Completions](bash_completions.md) for details. - -### Bash completion V2 - -Cobra provides two versions for bash completion. The original bash completion (which started it all!) can be used by calling -`GenBashCompletion()` or `GenBashCompletionFile()`. - -A new V2 bash completion version is also available. This version can be used by calling `GenBashCompletionV2()` or -`GenBashCompletionFileV2()`. The V2 version does **not** support the legacy dynamic completion -(see [Bash Completions](bash_completions.md)) but instead works only with the Go dynamic completion -solution described in this document. -Unless your program already uses the legacy dynamic completion solution, it is recommended that you use the bash -completion V2 solution which provides the following extra features: -- Supports completion descriptions (like the other shells) -- Small completion script of less than 300 lines (v1 generates scripts of thousands of lines; `kubectl` for example has a bash v1 completion script of over 13K lines) -- Streamlined user experience thanks to a completion behavior aligned with the other shells - -`Bash` completion V2 supports descriptions for completions. When calling `GenBashCompletionV2()` or `GenBashCompletionFileV2()` -you must provide these functions with a parameter indicating if the completions should be annotated with a description; Cobra -will provide the description automatically based on usage information. You can choose to make this option configurable by -your users. - -``` -# With descriptions -$ helm s[tab][tab] -search (search for a keyword in charts) status (display the status of the named release) -show (show information of a chart) - -# Without descriptions -$ helm s[tab][tab] -search show status -``` -**Note**: Cobra's default `completion` command uses bash completion V2. If for some reason you need to use bash completion V1, you will need to implement your own `completion` command. -## Zsh completions - -Cobra supports native zsh completion generated from the root `cobra.Command`. -The generated completion script should be put somewhere in your `$fpath` and be named -`_`. You will need to start a new shell for the completions to become available. - -Zsh supports descriptions for completions. Cobra will provide the description automatically, -based on usage information. Cobra provides a way to completely disable such descriptions by -using `GenZshCompletionNoDesc()` or `GenZshCompletionFileNoDesc()`. You can choose to make -this a configurable option to your users. -``` -# With descriptions -$ helm s[tab] -search -- search for a keyword in charts -show -- show information of a chart -status -- displays the status of the named release - -# Without descriptions -$ helm s[tab] -search show status -``` -*Note*: Because of backward-compatibility requirements, we were forced to have a different API to disable completion descriptions between `zsh` and `fish`. - -### Limitations - -* Custom completions implemented in Bash scripting (legacy) are not supported and will be ignored for `zsh` (including the use of the `BashCompCustom` flag annotation). - * You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`, `powershell`). -* The function `MarkFlagCustom()` is not supported and will be ignored for `zsh`. - * You should instead use `RegisterFlagCompletionFunc()`. - -### Zsh completions standardization - -Cobra 1.1 standardized its zsh completion support to align it with its other shell completions. Although the API was kept backward-compatible, some small changes in behavior were introduced. -Please refer to [Zsh Completions](zsh_completions.md) for details. - -## fish completions - -Cobra supports native fish completions generated from the root `cobra.Command`. You can use the `command.GenFishCompletion()` or `command.GenFishCompletionFile()` functions. You must provide these functions with a parameter indicating if the completions should be annotated with a description; Cobra will provide the description automatically based on usage information. You can choose to make this option configurable by your users. -``` -# With descriptions -$ helm s[tab] -search (search for a keyword in charts) show (show information of a chart) status (displays the status of the named release) - -# Without descriptions -$ helm s[tab] -search show status -``` -*Note*: Because of backward-compatibility requirements, we were forced to have a different API to disable completion descriptions between `zsh` and `fish`. - -### Limitations - -* Custom completions implemented in bash scripting (legacy) are not supported and will be ignored for `fish` (including the use of the `BashCompCustom` flag annotation). - * You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`, `powershell`). -* The function `MarkFlagCustom()` is not supported and will be ignored for `fish`. - * You should instead use `RegisterFlagCompletionFunc()`. -* The following flag completion annotations are not supported and will be ignored for `fish`: - * `BashCompFilenameExt` (filtering by file extension) - * `BashCompSubdirsInDir` (filtering by directory) -* The functions corresponding to the above annotations are consequently not supported and will be ignored for `fish`: - * `MarkFlagFilename()` and `MarkPersistentFlagFilename()` (filtering by file extension) - * `MarkFlagDirname()` and `MarkPersistentFlagDirname()` (filtering by directory) -* Similarly, the following completion directives are not supported and will be ignored for `fish`: - * `ShellCompDirectiveFilterFileExt` (filtering by file extension) - * `ShellCompDirectiveFilterDirs` (filtering by directory) - -## PowerShell completions - -Cobra supports native PowerShell completions generated from the root `cobra.Command`. You can use the `command.GenPowerShellCompletion()` or `command.GenPowerShellCompletionFile()` functions. To include descriptions use `command.GenPowerShellCompletionWithDesc()` and `command.GenPowerShellCompletionFileWithDesc()`. Cobra will provide the description automatically based on usage information. You can choose to make this option configurable by your users. - -The script is designed to support all three PowerShell completion modes: - -* TabCompleteNext (default windows style - on each key press the next option is displayed) -* Complete (works like bash) -* MenuComplete (works like zsh) - -You set the mode with `Set-PSReadLineKeyHandler -Key Tab -Function `. Descriptions are only displayed when using the `Complete` or `MenuComplete` mode. - -Users need PowerShell version 5.0 or above, which comes with Windows 10 and can be downloaded separately for Windows 7 or 8.1. They can then write the completions to a file and source this file from their PowerShell profile, which is referenced by the `$Profile` environment variable. See `Get-Help about_Profiles` for more info about PowerShell profiles. - -``` -# With descriptions and Mode 'Complete' -$ helm s[tab] -search (search for a keyword in charts) show (show information of a chart) status (displays the status of the named release) - -# With descriptions and Mode 'MenuComplete' The description of the current selected value will be displayed below the suggestions. -$ helm s[tab] -search show status - -search for a keyword in charts - -# Without descriptions -$ helm s[tab] -search show status -``` -### Aliases - -You can also configure `powershell` aliases for your program and they will also support completions. - -``` -$ sal aliasname origcommand -$ Register-ArgumentCompleter -CommandName 'aliasname' -ScriptBlock $__origcommandCompleterBlock - -# and now when you run `aliasname` completion will make -# suggestions as it did for `origcommand`. - -$ aliasname -completion firstcommand secondcommand -``` -The name of the completer block variable is of the form `$__CompleterBlock` where every `-` and `:` in the program name have been replaced with `_`, to respect powershell naming syntax. - -### Limitations - -* Custom completions implemented in bash scripting (legacy) are not supported and will be ignored for `powershell` (including the use of the `BashCompCustom` flag annotation). - * You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`, `powershell`). -* The function `MarkFlagCustom()` is not supported and will be ignored for `powershell`. - * You should instead use `RegisterFlagCompletionFunc()`. -* The following flag completion annotations are not supported and will be ignored for `powershell`: - * `BashCompFilenameExt` (filtering by file extension) - * `BashCompSubdirsInDir` (filtering by directory) -* The functions corresponding to the above annotations are consequently not supported and will be ignored for `powershell`: - * `MarkFlagFilename()` and `MarkPersistentFlagFilename()` (filtering by file extension) - * `MarkFlagDirname()` and `MarkPersistentFlagDirname()` (filtering by directory) -* Similarly, the following completion directives are not supported and will be ignored for `powershell`: - * `ShellCompDirectiveFilterFileExt` (filtering by file extension) - * `ShellCompDirectiveFilterDirs` (filtering by directory) diff --git a/vendor/github.com/spf13/cobra/user_guide.md b/vendor/github.com/spf13/cobra/user_guide.md deleted file mode 100644 index 85201d84..00000000 --- a/vendor/github.com/spf13/cobra/user_guide.md +++ /dev/null @@ -1,726 +0,0 @@ -# User Guide - -While you are welcome to provide your own organization, typically a Cobra-based -application will follow the following organizational structure: - -``` - â–¾ appName/ - â–¾ cmd/ - add.go - your.go - commands.go - here.go - main.go -``` - -In a Cobra app, typically the main.go file is very bare. It serves one purpose: initializing Cobra. - -```go -package main - -import ( - "{pathToYourApp}/cmd" -) - -func main() { - cmd.Execute() -} -``` - -## Using the Cobra Generator - -Cobra-CLI is its own program that will create your application and add any -commands you want. It's the easiest way to incorporate Cobra into your application. - -For complete details on using the Cobra generator, please refer to [The Cobra-CLI Generator README](https://github.com/spf13/cobra-cli/blob/main/README.md) - -## Using the Cobra Library - -To manually implement Cobra you need to create a bare main.go file and a rootCmd file. -You will optionally provide additional commands as you see fit. - -### Create rootCmd - -Cobra doesn't require any special constructors. Simply create your commands. - -Ideally you place this in app/cmd/root.go: - -```go -var rootCmd = &cobra.Command{ - Use: "hugo", - Short: "Hugo is a very fast static site generator", - Long: `A Fast and Flexible Static Site Generator built with - love by spf13 and friends in Go. - Complete documentation is available at https://gohugo.io/documentation/`, - Run: func(cmd *cobra.Command, args []string) { - // Do Stuff Here - }, -} - -func Execute() { - if err := rootCmd.Execute(); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } -} -``` - -You will additionally define flags and handle configuration in your init() function. - -For example cmd/root.go: - -```go -package cmd - -import ( - "fmt" - "os" - - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -var ( - // Used for flags. - cfgFile string - userLicense string - - rootCmd = &cobra.Command{ - Use: "cobra-cli", - Short: "A generator for Cobra based Applications", - Long: `Cobra is a CLI library for Go that empowers applications. -This application is a tool to generate the needed files -to quickly create a Cobra application.`, - } -) - -// Execute executes the root command. -func Execute() error { - return rootCmd.Execute() -} - -func init() { - cobra.OnInitialize(initConfig) - - rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)") - rootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "author name for copyright attribution") - rootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "name of license for the project") - rootCmd.PersistentFlags().Bool("viper", true, "use Viper for configuration") - viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author")) - viper.BindPFlag("useViper", rootCmd.PersistentFlags().Lookup("viper")) - viper.SetDefault("author", "NAME HERE ") - viper.SetDefault("license", "apache") - - rootCmd.AddCommand(addCmd) - rootCmd.AddCommand(initCmd) -} - -func initConfig() { - if cfgFile != "" { - // Use config file from the flag. - viper.SetConfigFile(cfgFile) - } else { - // Find home directory. - home, err := os.UserHomeDir() - cobra.CheckErr(err) - - // Search config in home directory with name ".cobra" (without extension). - viper.AddConfigPath(home) - viper.SetConfigType("yaml") - viper.SetConfigName(".cobra") - } - - viper.AutomaticEnv() - - if err := viper.ReadInConfig(); err == nil { - fmt.Println("Using config file:", viper.ConfigFileUsed()) - } -} -``` - -### Create your main.go - -With the root command you need to have your main function execute it. -Execute should be run on the root for clarity, though it can be called on any command. - -In a Cobra app, typically the main.go file is very bare. It serves one purpose: to initialize Cobra. - -```go -package main - -import ( - "{pathToYourApp}/cmd" -) - -func main() { - cmd.Execute() -} -``` - -### Create additional commands - -Additional commands can be defined and typically are each given their own file -inside of the cmd/ directory. - -If you wanted to create a version command you would create cmd/version.go and -populate it with the following: - -```go -package cmd - -import ( - "fmt" - - "github.com/spf13/cobra" -) - -func init() { - rootCmd.AddCommand(versionCmd) -} - -var versionCmd = &cobra.Command{ - Use: "version", - Short: "Print the version number of Hugo", - Long: `All software has versions. This is Hugo's`, - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Hugo Static Site Generator v0.9 -- HEAD") - }, -} -``` - -### Organizing subcommands - -A command may have subcommands which in turn may have other subcommands. This is achieved by using -`AddCommand`. In some cases, especially in larger applications, each subcommand may be defined in -its own go package. - -The suggested approach is for the parent command to use `AddCommand` to add its most immediate -subcommands. For example, consider the following directory structure: - -```text -├── cmd -│   ├── root.go -│   └── sub1 -│   ├── sub1.go -│   └── sub2 -│   ├── leafA.go -│   ├── leafB.go -│   └── sub2.go -└── main.go -``` - -In this case: - -* The `init` function of `root.go` adds the command defined in `sub1.go` to the root command. -* The `init` function of `sub1.go` adds the command defined in `sub2.go` to the sub1 command. -* The `init` function of `sub2.go` adds the commands defined in `leafA.go` and `leafB.go` to the - sub2 command. - -This approach ensures the subcommands are always included at compile time while avoiding cyclic -references. - -### Returning and handling errors - -If you wish to return an error to the caller of a command, `RunE` can be used. - -```go -package cmd - -import ( - "fmt" - - "github.com/spf13/cobra" -) - -func init() { - rootCmd.AddCommand(tryCmd) -} - -var tryCmd = &cobra.Command{ - Use: "try", - Short: "Try and possibly fail at something", - RunE: func(cmd *cobra.Command, args []string) error { - if err := someFunc(); err != nil { - return err - } - return nil - }, -} -``` - -The error can then be caught at the execute function call. - -## Working with Flags - -Flags provide modifiers to control how the action command operates. - -### Assign flags to a command - -Since the flags are defined and used in different locations, we need to -define a variable outside with the correct scope to assign the flag to -work with. - -```go -var Verbose bool -var Source string -``` - -There are two different approaches to assign a flag. - -### Persistent Flags - -A flag can be 'persistent', meaning that this flag will be available to the -command it's assigned to as well as every command under that command. For -global flags, assign a flag as a persistent flag on the root. - -```go -rootCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output") -``` - -### Local Flags - -A flag can also be assigned locally, which will only apply to that specific command. - -```go -localCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from") -``` - -### Local Flag on Parent Commands - -By default, Cobra only parses local flags on the target command, and any local flags on -parent commands are ignored. By enabling `Command.TraverseChildren`, Cobra will -parse local flags on each command before executing the target command. - -```go -command := cobra.Command{ - Use: "print [OPTIONS] [COMMANDS]", - TraverseChildren: true, -} -``` - -### Bind Flags with Config - -You can also bind your flags with [viper](https://github.com/spf13/viper): -```go -var author string - -func init() { - rootCmd.PersistentFlags().StringVar(&author, "author", "YOUR NAME", "Author name for copyright attribution") - viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author")) -} -``` - -In this example, the persistent flag `author` is bound with `viper`. -**Note**: the variable `author` will not be set to the value from config, -when the `--author` flag is provided by user. - -More in [viper documentation](https://github.com/spf13/viper#working-with-flags). - -### Required flags - -Flags are optional by default. If instead you wish your command to report an error -when a flag has not been set, mark it as required: -```go -rootCmd.Flags().StringVarP(&Region, "region", "r", "", "AWS region (required)") -rootCmd.MarkFlagRequired("region") -``` - -Or, for persistent flags: -```go -rootCmd.PersistentFlags().StringVarP(&Region, "region", "r", "", "AWS region (required)") -rootCmd.MarkPersistentFlagRequired("region") -``` - -### Flag Groups - -If you have different flags that must be provided together (e.g. if they provide the `--username` flag they MUST provide the `--password` flag as well) then -Cobra can enforce that requirement: -```go -rootCmd.Flags().StringVarP(&u, "username", "u", "", "Username (required if password is set)") -rootCmd.Flags().StringVarP(&pw, "password", "p", "", "Password (required if username is set)") -rootCmd.MarkFlagsRequiredTogether("username", "password") -``` - -You can also prevent different flags from being provided together if they represent mutually -exclusive options such as specifying an output format as either `--json` or `--yaml` but never both: -```go -rootCmd.Flags().BoolVar(&ofJson, "json", false, "Output in JSON") -rootCmd.Flags().BoolVar(&ofYaml, "yaml", false, "Output in YAML") -rootCmd.MarkFlagsMutuallyExclusive("json", "yaml") -``` - -In both of these cases: - - both local and persistent flags can be used - - **NOTE:** the group is only enforced on commands where every flag is defined - - a flag may appear in multiple groups - - a group may contain any number of flags - -## Positional and Custom Arguments - -Validation of positional arguments can be specified using the `Args` field of `Command`. -The following validators are built in: - -- Number of arguments: - - `NoArgs` - report an error if there are any positional args. - - `ArbitraryArgs` - accept any number of args. - - `MinimumNArgs(int)` - report an error if less than N positional args are provided. - - `MaximumNArgs(int)` - report an error if more than N positional args are provided. - - `ExactArgs(int)` - report an error if there are not exactly N positional args. - - `RangeArgs(min, max)` - report an error if the number of args is not between `min` and `max`. -- Content of the arguments: - - `OnlyValidArgs` - report an error if there are any positional args not specified in the `ValidArgs` field of `Command`, which can optionally be set to a list of valid values for positional args. - -If `Args` is undefined or `nil`, it defaults to `ArbitraryArgs`. - -Moreover, `MatchAll(pargs ...PositionalArgs)` enables combining existing checks with arbitrary other checks. -For instance, if you want to report an error if there are not exactly N positional args OR if there are any positional -args that are not in the `ValidArgs` field of `Command`, you can call `MatchAll` on `ExactArgs` and `OnlyValidArgs`, as -shown below: - -```go -var cmd = &cobra.Command{ - Short: "hello", - Args: cobra.MatchAll(cobra.ExactArgs(2), cobra.OnlyValidArgs), - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Hello, World!") - }, -} -``` - -It is possible to set any custom validator that satisfies `func(cmd *cobra.Command, args []string) error`. -For example: - -```go -var cmd = &cobra.Command{ - Short: "hello", - Args: func(cmd *cobra.Command, args []string) error { - // Optionally run one of the validators provided by cobra - if err := cobra.MinimumNArgs(1)(cmd, args); err != nil { - return err - } - // Run the custom validation logic - if myapp.IsValidColor(args[0]) { - return nil - } - return fmt.Errorf("invalid color specified: %s", args[0]) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Hello, World!") - }, -} -``` - -## Example - -In the example below, we have defined three commands. Two are at the top level -and one (cmdTimes) is a child of one of the top commands. In this case the root -is not executable, meaning that a subcommand is required. This is accomplished -by not providing a 'Run' for the 'rootCmd'. - -We have only defined one flag for a single command. - -More documentation about flags is available at https://github.com/spf13/pflag - -```go -package main - -import ( - "fmt" - "strings" - - "github.com/spf13/cobra" -) - -func main() { - var echoTimes int - - var cmdPrint = &cobra.Command{ - Use: "print [string to print]", - Short: "Print anything to the screen", - Long: `print is for printing anything back to the screen. -For many years people have printed back to the screen.`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Print: " + strings.Join(args, " ")) - }, - } - - var cmdEcho = &cobra.Command{ - Use: "echo [string to echo]", - Short: "Echo anything to the screen", - Long: `echo is for echoing anything back. -Echo works a lot like print, except it has a child command.`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Echo: " + strings.Join(args, " ")) - }, - } - - var cmdTimes = &cobra.Command{ - Use: "times [string to echo]", - Short: "Echo anything to the screen more times", - Long: `echo things multiple times back to the user by providing -a count and a string.`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - for i := 0; i < echoTimes; i++ { - fmt.Println("Echo: " + strings.Join(args, " ")) - } - }, - } - - cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input") - - var rootCmd = &cobra.Command{Use: "app"} - rootCmd.AddCommand(cmdPrint, cmdEcho) - cmdEcho.AddCommand(cmdTimes) - rootCmd.Execute() -} -``` - -For a more complete example of a larger application, please checkout [Hugo](https://gohugo.io/). - -## Help Command - -Cobra automatically adds a help command to your application when you have subcommands. -This will be called when a user runs 'app help'. Additionally, help will also -support all other commands as input. Say, for instance, you have a command called -'create' without any additional configuration; Cobra will work when 'app help -create' is called. Every command will automatically have the '--help' flag added. - -### Example - -The following output is automatically generated by Cobra. Nothing beyond the -command and flag definitions are needed. - - $ cobra-cli help - - Cobra is a CLI library for Go that empowers applications. - This application is a tool to generate the needed files - to quickly create a Cobra application. - - Usage: - cobra-cli [command] - - Available Commands: - add Add a command to a Cobra Application - completion Generate the autocompletion script for the specified shell - help Help about any command - init Initialize a Cobra Application - - Flags: - -a, --author string author name for copyright attribution (default "YOUR NAME") - --config string config file (default is $HOME/.cobra.yaml) - -h, --help help for cobra-cli - -l, --license string name of license for the project - --viper use Viper for configuration - - Use "cobra-cli [command] --help" for more information about a command. - - -Help is just a command like any other. There is no special logic or behavior -around it. In fact, you can provide your own if you want. - -### Grouping commands in help - -Cobra supports grouping of available commands in the help output. To group commands, each group must be explicitly -defined using `AddGroup()` on the parent command. Then a subcommand can be added to a group using the `GroupID` element -of that subcommand. The groups will appear in the help output in the same order as they are defined using different -calls to `AddGroup()`. If you use the generated `help` or `completion` commands, you can set their group ids using -`SetHelpCommandGroupId()` and `SetCompletionCommandGroupId()` on the root command, respectively. - -### Defining your own help - -You can provide your own Help command or your own template for the default command to use -with the following functions: - -```go -cmd.SetHelpCommand(cmd *Command) -cmd.SetHelpFunc(f func(*Command, []string)) -cmd.SetHelpTemplate(s string) -``` - -The latter two will also apply to any children commands. - -## Usage Message - -When the user provides an invalid flag or invalid command, Cobra responds by -showing the user the 'usage'. - -### Example -You may recognize this from the help above. That's because the default help -embeds the usage as part of its output. - - $ cobra-cli --invalid - Error: unknown flag: --invalid - Usage: - cobra-cli [command] - - Available Commands: - add Add a command to a Cobra Application - completion Generate the autocompletion script for the specified shell - help Help about any command - init Initialize a Cobra Application - - Flags: - -a, --author string author name for copyright attribution (default "YOUR NAME") - --config string config file (default is $HOME/.cobra.yaml) - -h, --help help for cobra-cli - -l, --license string name of license for the project - --viper use Viper for configuration - - Use "cobra [command] --help" for more information about a command. - -### Defining your own usage -You can provide your own usage function or template for Cobra to use. -Like help, the function and template are overridable through public methods: - -```go -cmd.SetUsageFunc(f func(*Command) error) -cmd.SetUsageTemplate(s string) -``` - -## Version Flag - -Cobra adds a top-level '--version' flag if the Version field is set on the root command. -Running an application with the '--version' flag will print the version to stdout using -the version template. The template can be customized using the -`cmd.SetVersionTemplate(s string)` function. - -## PreRun and PostRun Hooks - -It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistentPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherited by children if they do not declare their own. These functions are run in the following order: - -- `PersistentPreRun` -- `PreRun` -- `Run` -- `PostRun` -- `PersistentPostRun` - -An example of two commands which use all of these features is below. When the subcommand is executed, it will run the root command's `PersistentPreRun` but not the root command's `PersistentPostRun`: - -```go -package main - -import ( - "fmt" - - "github.com/spf13/cobra" -) - -func main() { - - var rootCmd = &cobra.Command{ - Use: "root [sub]", - Short: "My root command", - PersistentPreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args) - }, - PreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PreRun with args: %v\n", args) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd Run with args: %v\n", args) - }, - PostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PostRun with args: %v\n", args) - }, - PersistentPostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args) - }, - } - - var subCmd = &cobra.Command{ - Use: "sub [no options!]", - Short: "My subcommand", - PreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PreRun with args: %v\n", args) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd Run with args: %v\n", args) - }, - PostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PostRun with args: %v\n", args) - }, - PersistentPostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args) - }, - } - - rootCmd.AddCommand(subCmd) - - rootCmd.SetArgs([]string{""}) - rootCmd.Execute() - fmt.Println() - rootCmd.SetArgs([]string{"sub", "arg1", "arg2"}) - rootCmd.Execute() -} -``` - -Output: -``` -Inside rootCmd PersistentPreRun with args: [] -Inside rootCmd PreRun with args: [] -Inside rootCmd Run with args: [] -Inside rootCmd PostRun with args: [] -Inside rootCmd PersistentPostRun with args: [] - -Inside rootCmd PersistentPreRun with args: [arg1 arg2] -Inside subCmd PreRun with args: [arg1 arg2] -Inside subCmd Run with args: [arg1 arg2] -Inside subCmd PostRun with args: [arg1 arg2] -Inside subCmd PersistentPostRun with args: [arg1 arg2] -``` - -## Suggestions when "unknown command" happens - -Cobra will print automatic suggestions when "unknown command" errors happen. This allows Cobra to behave similarly to the `git` command when a typo happens. For example: - -``` -$ hugo srever -Error: unknown command "srever" for "hugo" - -Did you mean this? - server - -Run 'hugo --help' for usage. -``` - -Suggestions are automatically generated based on existing subcommands and use an implementation of [Levenshtein distance](https://en.wikipedia.org/wiki/Levenshtein_distance). Every registered command that matches a minimum distance of 2 (ignoring case) will be displayed as a suggestion. - -If you need to disable suggestions or tweak the string distance in your command, use: - -```go -command.DisableSuggestions = true -``` - -or - -```go -command.SuggestionsMinimumDistance = 1 -``` - -You can also explicitly set names for which a given command will be suggested using the `SuggestFor` attribute. This allows suggestions for strings that are not close in terms of string distance, but make sense in your set of commands but for which -you don't want aliases. Example: - -``` -$ kubectl remove -Error: unknown command "remove" for "kubectl" - -Did you mean this? - delete - -Run 'kubectl help' for usage. -``` - -## Generating documentation for your command - -Cobra can generate documentation based on subcommands, flags, etc. Read more about it in the [docs generation documentation](doc/README.md). - -## Generating shell completions - -Cobra can generate a shell-completion file for the following shells: bash, zsh, fish, PowerShell. If you add more information to your commands, these completions can be amazingly powerful and flexible. Read more about it in [Shell Completions](shell_completions.md). - -## Providing Active Help - -Cobra makes use of the shell-completion system to define a framework allowing you to provide Active Help to your users. Active Help are messages (hints, warnings, etc) printed as the program is being used. Read more about it in [Active Help](active_help.md). diff --git a/vendor/github.com/spf13/cobra/zsh_completions.md b/vendor/github.com/spf13/cobra/zsh_completions.md deleted file mode 100644 index 7cff6178..00000000 --- a/vendor/github.com/spf13/cobra/zsh_completions.md +++ /dev/null @@ -1,48 +0,0 @@ -## Generating Zsh Completion For Your cobra.Command - -Please refer to [Shell Completions](shell_completions.md) for details. - -## Zsh completions standardization - -Cobra 1.1 standardized its zsh completion support to align it with its other shell completions. Although the API was kept backwards-compatible, some small changes in behavior were introduced. - -### Deprecation summary - -See further below for more details on these deprecations. - -* `cmd.MarkZshCompPositionalArgumentFile(pos, []string{})` is no longer needed. It is therefore **deprecated** and silently ignored. -* `cmd.MarkZshCompPositionalArgumentFile(pos, glob[])` is **deprecated** and silently ignored. - * Instead use `ValidArgsFunction` with `ShellCompDirectiveFilterFileExt`. -* `cmd.MarkZshCompPositionalArgumentWords()` is **deprecated** and silently ignored. - * Instead use `ValidArgsFunction`. - -### Behavioral changes - -**Noun completion** -|Old behavior|New behavior| -|---|---| -|No file completion by default (opposite of bash)|File completion by default; use `ValidArgsFunction` with `ShellCompDirectiveNoFileComp` to turn off file completion on a per-argument basis| -|Completion of flag names without the `-` prefix having been typed|Flag names are only completed if the user has typed the first `-`| -`cmd.MarkZshCompPositionalArgumentFile(pos, []string{})` used to turn on file completion on a per-argument position basis|File completion for all arguments by default; `cmd.MarkZshCompPositionalArgumentFile()` is **deprecated** and silently ignored| -|`cmd.MarkZshCompPositionalArgumentFile(pos, glob[])` used to turn on file completion **with glob filtering** on a per-argument position basis (zsh-specific)|`cmd.MarkZshCompPositionalArgumentFile()` is **deprecated** and silently ignored; use `ValidArgsFunction` with `ShellCompDirectiveFilterFileExt` for file **extension** filtering (not full glob filtering)| -|`cmd.MarkZshCompPositionalArgumentWords(pos, words[])` used to provide completion choices on a per-argument position basis (zsh-specific)|`cmd.MarkZshCompPositionalArgumentWords()` is **deprecated** and silently ignored; use `ValidArgsFunction` to achieve the same behavior| - -**Flag-value completion** - -|Old behavior|New behavior| -|---|---| -|No file completion by default (opposite of bash)|File completion by default; use `RegisterFlagCompletionFunc()` with `ShellCompDirectiveNoFileComp` to turn off file completion| -|`cmd.MarkFlagFilename(flag, []string{})` and similar used to turn on file completion|File completion by default; `cmd.MarkFlagFilename(flag, []string{})` no longer needed in this context and silently ignored| -|`cmd.MarkFlagFilename(flag, glob[])` used to turn on file completion **with glob filtering** (syntax of `[]string{"*.yaml", "*.yml"}` incompatible with bash)|Will continue to work, however, support for bash syntax is added and should be used instead so as to work for all shells (`[]string{"yaml", "yml"}`)| -|`cmd.MarkFlagDirname(flag)` only completes directories (zsh-specific)|Has been added for all shells| -|Completion of a flag name does not repeat, unless flag is of type `*Array` or `*Slice` (not supported by bash)|Retained for `zsh` and added to `fish`| -|Completion of a flag name does not provide the `=` form (unlike bash)|Retained for `zsh` and added to `fish`| - -**Improvements** - -* Custom completion support (`ValidArgsFunction` and `RegisterFlagCompletionFunc()`) -* File completion by default if no other completions found -* Handling of required flags -* File extension filtering no longer mutually exclusive with bash usage -* Completion of directory names *within* another directory -* Support for `=` form of flags diff --git a/vendor/github.com/vanilla-os/prometheus/.devcontainer.json b/vendor/github.com/vanilla-os/prometheus/.devcontainer.json new file mode 100644 index 00000000..2f07aa3d --- /dev/null +++ b/vendor/github.com/vanilla-os/prometheus/.devcontainer.json @@ -0,0 +1,3 @@ +{ + "image": "ghcr.io/vanilla-os/dev:main" +} \ No newline at end of file diff --git a/vendor/github.com/vanilla-os/prometheus/client.go b/vendor/github.com/vanilla-os/prometheus/client.go index a5acc49b..ddb0d339 100644 --- a/vendor/github.com/vanilla-os/prometheus/client.go +++ b/vendor/github.com/vanilla-os/prometheus/client.go @@ -17,6 +17,7 @@ import ( "fmt" "os" "path/filepath" + "time" "github.com/containers/buildah/define" "github.com/containers/buildah/imagebuildah" @@ -65,61 +66,106 @@ func NewPrometheus(root, graphDriverName string, maxParallelDownloads uint) (*Pr }, nil } -/* PullImage pulls an image from a remote registry and stores it in the - * Prometheus store. It returns the manifest of the pulled image and an - * error if any. Note that the 'docker://' prefix is automatically added - * to the imageName to make it compatible with the alltransports.ParseImageName - * method. */ -func (p *Prometheus) PullImage(imageName string, dstName string) (*OciManifest, error) { - srcRef, err := alltransports.ParseImageName(fmt.Sprintf("docker://%s", imageName)) +// PullImage pulls an image from a remote registry and stores it in the +// Prometheus store. It returns the manifest of the pulled image and an +// error if any. Note that the 'docker://' prefix is automatically added +// to the imageName to make it compatible with the alltransports.ParseImageName +// method. +func (p *Prometheus) PullImage(imageName, dstName string) (*OciManifest, error) { + progressCh := make(chan types.ProgressProperties) + manifestCh := make(chan OciManifest) + + defer close(progressCh) + defer close(manifestCh) + + err := p.pullImage(imageName, dstName, progressCh, manifestCh) if err != nil { return nil, err } + for { + select { + case report := <-progressCh: + fmt.Printf("%s: %v/%v\n", report.Artifact.Digest.Encoded()[:12], report.Offset, report.Artifact.Size) + case manifest := <-manifestCh: + return &manifest, nil + } + } +} + +// PullImageAsync does the same thing as PullImage, but returns right +// after starting the pull process. The user can track progress in the +// background by reading from the `progressCh` channel, which contains +// information about the current blob and its progress. When the pull +// process is done, the image's manifest will be sent via the `manifestCh` +// channel, which indicates the process is done. +// +// NOTE: The user is responsible for closing both channels once the operation +// completes. +func (p *Prometheus) PullImageAsync(imageName, dstName string, progressCh chan types.ProgressProperties, manifestCh chan OciManifest) error { + err := p.pullImage(imageName, dstName, progressCh, manifestCh) + return err +} + +func (p *Prometheus) pullImage(imageName, dstName string, progressCh chan types.ProgressProperties, manifestCh chan OciManifest) error { + srcRef, err := alltransports.ParseImageName(fmt.Sprintf("docker://%s", imageName)) + if err != nil { + return err + } destRef, err := storage.Transport.ParseStoreReference(p.Store, dstName) if err != nil { - return nil, err + return err } systemCtx := &types.SystemContext{} policy, err := signature.DefaultPolicy(systemCtx) if err != nil { - return nil, err + return err } policyCtx, err := signature.NewPolicyContext(policy) if err != nil { - return nil, err + return err } - pulledManifestBytes, err := copy.Image( - context.Background(), - policyCtx, - destRef, - srcRef, - ©.Options{ - ReportWriter: os.Stdout, - MaxParallelDownloads: p.Config.MaxParallelDownloads, - }, - ) + duration, err := time.ParseDuration("100ms") if err != nil { - return nil, err - } + return err + } + + go func() { + pulledManifestBytes, err := copy.Image( + context.Background(), + policyCtx, + destRef, + srcRef, + ©.Options{ + MaxParallelDownloads: p.Config.MaxParallelDownloads, + ProgressInterval: duration, + Progress: progressCh, + }, + ) + if err != nil { + return + } - var manifest OciManifest - err = json.Unmarshal(pulledManifestBytes, &manifest) - if err != nil { - return nil, err - } + var manifest OciManifest + err = json.Unmarshal(pulledManifestBytes, &manifest) + if err != nil { + return + } - // here we remove the 'sha256:' prefix from the digest, so we don't have - // to deal with it later - manifest.Config.Digest = manifest.Config.Digest[7:] - for i := range manifest.Layers { - manifest.Layers[i].Digest = manifest.Layers[i].Digest[7:] - } + // here we remove the 'sha256:' prefix from the digest, so we don't have + // to deal with it later + manifest.Config.Digest = manifest.Config.Digest[7:] + for i := range manifest.Layers { + manifest.Layers[i].Digest = manifest.Layers[i].Digest[7:] + } + + manifestCh <- manifest + }() - return &manifest, nil + return nil } /* GetImageByDigest returns an image from the Prometheus store by its digest. */ @@ -185,8 +231,8 @@ func (p *Prometheus) BuildContainerFile(dockerfilePath string, imageName string) context.Background(), p.Store, define.BuildOptions{ - Output: imageName, - }, + Output: imageName, + }, dockerfilePath, ) if err != nil { diff --git a/vendor/github.com/vanilla-os/prometheus/main.go b/vendor/github.com/vanilla-os/prometheus/main.go index 66ad669b..7b1b4c03 100644 --- a/vendor/github.com/vanilla-os/prometheus/main.go +++ b/vendor/github.com/vanilla-os/prometheus/main.go @@ -1,5 +1 @@ package prometheus - -var version = "0.1.6" - -func main() {} diff --git a/vendor/github.com/vbauerster/mpb/v8/README.md b/vendor/github.com/vbauerster/mpb/v8/README.md index 09825ca0..af97c92a 100644 --- a/vendor/github.com/vbauerster/mpb/v8/README.md +++ b/vendor/github.com/vbauerster/mpb/v8/README.md @@ -42,11 +42,9 @@ func main() { mpb.BarStyle().Lbound("â•¢").Filler("â–Œ").Tip("â–Œ").Padding("â–‘").Rbound("â•Ÿ"), mpb.PrependDecorators( // display our name with one space on the right - decor.Name(name, decor.WC{W: len(name) + 1, C: decor.DidentRight}), + decor.Name(name, decor.WC{C: decor.DindentRight | decor.DextraSpace}), // replace ETA decorator with "done" message, OnComplete event - decor.OnComplete( - decor.AverageETA(decor.ET_STYLE_GO, decor.WC{W: 4}), "done", - ), + decor.OnComplete(decor.AverageETA(decor.ET_STYLE_GO), "done"), ), mpb.AppendDecorators(decor.Percentage()), ) diff --git a/vendor/github.com/vbauerster/mpb/v8/bar.go b/vendor/github.com/vbauerster/mpb/v8/bar.go index 5f814121..390bc36a 100644 --- a/vendor/github.com/vbauerster/mpb/v8/bar.go +++ b/vendor/github.com/vbauerster/mpb/v8/bar.go @@ -429,13 +429,11 @@ func (b *Bar) render(tw int) { return } } - frame := &renderFrame{ - rows: rows, - shutdown: s.shutdown, - rmOnComplete: s.rmOnComplete, - noPop: s.noPop, - } + frame := &renderFrame{rows: rows} if s.completed || s.aborted { + frame.shutdown = s.shutdown + frame.rmOnComplete = s.rmOnComplete + frame.noPop = s.noPop // post increment makes sure OnComplete decorators are rendered s.shutdown++ } @@ -460,12 +458,15 @@ func (b *Bar) triggerCompletion(s *bState) { } func (b *Bar) tryEarlyRefresh(renderReq chan<- time.Time) { - var anyOtherRunning bool + var otherRunning int b.container.traverseBars(func(bar *Bar) bool { - anyOtherRunning = b != bar && bar.IsRunning() - return anyOtherRunning + if b != bar && bar.IsRunning() { + otherRunning++ + return false // stop traverse + } + return true // continue traverse }) - if !anyOtherRunning { + if otherRunning == 0 { for { select { case renderReq <- time.Now(): diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/decorator.go b/vendor/github.com/vbauerster/mpb/v8/decor/decorator.go index f537d3f7..31062ebd 100644 --- a/vendor/github.com/vbauerster/mpb/v8/decor/decorator.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/decorator.go @@ -8,29 +8,27 @@ import ( ) const ( - // DidentRight bit specifies identation direction. + // DindentRight sets indentation from right to left. // - // |foo |b | With DidentRight - // | foo| b| Without DidentRight - DidentRight = 1 << iota + // |foo |b | DindentRight is set + // | foo| b| DindentRight is not set + DindentRight = 1 << iota - // DextraSpace bit adds extra space, makes sense with DSyncWidth only. - // When DidentRight bit set, the space will be added to the right, - // otherwise to the left. + // DextraSpace bit adds extra indentation space. DextraSpace // DSyncWidth bit enables same column width synchronization. // Effective with multiple bars only. DSyncWidth - // DSyncWidthR is shortcut for DSyncWidth|DidentRight - DSyncWidthR = DSyncWidth | DidentRight + // DSyncWidthR is shortcut for DSyncWidth|DindentRight + DSyncWidthR = DSyncWidth | DindentRight // DSyncSpace is shortcut for DSyncWidth|DextraSpace DSyncSpace = DSyncWidth | DextraSpace - // DSyncSpaceR is shortcut for DSyncWidth|DextraSpace|DidentRight - DSyncSpaceR = DSyncWidth | DextraSpace | DidentRight + // DSyncSpaceR is shortcut for DSyncWidth|DextraSpace|DindentRight + DSyncSpaceR = DSyncWidth | DextraSpace | DindentRight ) // TimeStyle enum. @@ -143,11 +141,10 @@ func (wc WC) Format(str string) (string, int) { viewWidth := runewidth.StringWidth(str) if wc.W > viewWidth { viewWidth = wc.W + } else if (wc.C & DextraSpace) != 0 { + viewWidth++ } if (wc.C & DSyncWidth) != 0 { - if (wc.C & DextraSpace) != 0 { - viewWidth++ - } wc.wsync <- viewWidth viewWidth = <-wc.wsync } @@ -156,7 +153,7 @@ func (wc WC) Format(str string) (string, int) { // Init initializes width related config. func (wc *WC) Init() WC { - if (wc.C & DidentRight) != 0 { + if (wc.C & DindentRight) != 0 { wc.fill = runewidth.FillRight } else { wc.fill = runewidth.FillLeft diff --git a/vendor/github.com/vbauerster/mpb/v8/progress.go b/vendor/github.com/vbauerster/mpb/v8/progress.go index 8c609b46..0bd17bfb 100644 --- a/vendor/github.com/vbauerster/mpb/v8/progress.go +++ b/vendor/github.com/vbauerster/mpb/v8/progress.go @@ -68,14 +68,17 @@ func NewWithContext(ctx context.Context, options ...ContainerOption) *Progress { ctx = context.Background() } ctx, cancel := context.WithCancel(ctx) + delayRC := make(chan struct{}, 1) + delayRC <- struct{}{} s := &pState{ ctx: ctx, hm: make(heapManager), dropS: make(chan struct{}), dropD: make(chan struct{}), renderReq: make(chan time.Time), - refreshRate: defaultRefreshRate, popPriority: math.MinInt32, + refreshRate: defaultRefreshRate, + delayRC: delayRC, queueBars: make(map[*Bar]*Bar), output: os.Stdout, debugOut: io.Discard, @@ -191,7 +194,7 @@ func (p *Progress) traverseBars(cb func(b *Bar) bool) { select { case p.operateState <- func(s *pState) { s.hm.iter(iter, drop) }: for b := range iter { - if cb(b) { + if !cb(b) { close(drop) break } @@ -258,34 +261,52 @@ func (p *Progress) Shutdown() { func (p *Progress) serve(s *pState, cw *cwriter.Writer) { defer p.pwg.Done() - render := func() error { return s.render(cw) } var err error + w := cwriter.New(io.Discard) + renderReq := s.renderReq + operateState := p.operateState + interceptIO := p.interceptIO for { select { - case op := <-p.operateState: + case <-s.delayRC: + w, cw = cw, nil + s.delayRC = nil + case op := <-operateState: op(s) - case fn := <-p.interceptIO: - fn(cw) - case <-s.renderReq: - e := render() - if e != nil { + case fn := <-interceptIO: + fn(w) + case <-renderReq: + err = s.render(w) + if err != nil { + // (*pState).(autoRefreshListener|manualRefreshListener) may block + // if not launching following short lived goroutine + go func() { + for { + select { + case <-s.renderReq: + case <-p.done: + return + } + } + }() p.cancel() // cancel all bars - render = func() error { return nil } - err = e + renderReq = nil + operateState = nil + interceptIO = nil } case <-p.done: - update := make(chan bool) - for s.autoRefresh && err == nil { - s.hm.state(update) - if <-update { - err = render() - } else { - break - } - } if err != nil { _, _ = fmt.Fprintln(s.debugOut, err.Error()) + } else if s.autoRefresh { + update := make(chan bool) + for i := 0; i == 0 || <-update; i++ { + if err := s.render(w); err != nil { + _, _ = fmt.Fprintln(s.debugOut, err.Error()) + break + } + s.hm.state(update) + } } s.hm.end(s.shutdownNotifier) return @@ -293,10 +314,7 @@ func (p *Progress) serve(s *pState, cw *cwriter.Writer) { } } -func (s pState) autoRefreshListener(done chan struct{}) { - if s.delayRC != nil { - <-s.delayRC - } +func (s *pState) autoRefreshListener(done chan struct{}) { ticker := time.NewTicker(s.refreshRate) defer ticker.Stop() for { @@ -310,7 +328,7 @@ func (s pState) autoRefreshListener(done chan struct{}) { } } -func (s pState) manualRefreshListener(done chan struct{}) { +func (s *pState) manualRefreshListener(done chan struct{}) { for { select { case x := <-s.manualRC: @@ -342,9 +360,9 @@ func (s *pState) render(cw *cwriter.Writer) (err error) { if s.reqWidth > 0 { width = s.reqWidth } else { - width = 100 + width = 80 } - height = 100 + height = width } for b := range iter { @@ -420,7 +438,7 @@ func (s *pState) flush(cw *cwriter.Writer, height int) error { return cw.Flush(len(rows) - popCount) } -func (s pState) push(wg *sync.WaitGroup, b *Bar, sync bool) { +func (s *pState) push(wg *sync.WaitGroup, b *Bar, sync bool) { s.hm.push(b, sync) wg.Done() } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/codec_cache.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/codec_cache.go new file mode 100644 index 00000000..844b5029 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/codec_cache.go @@ -0,0 +1,166 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "reflect" + "sync" + "sync/atomic" +) + +// Runtime check that the kind encoder and decoder caches can store any valid +// reflect.Kind constant. +func init() { + if s := reflect.Kind(len(kindEncoderCache{}.entries)).String(); s != "kind27" { + panic("The capacity of kindEncoderCache is too small.\n" + + "This is due to a new type being added to reflect.Kind.") + } +} + +// statically assert array size +var _ = (kindEncoderCache{}).entries[reflect.UnsafePointer] +var _ = (kindDecoderCache{}).entries[reflect.UnsafePointer] + +type typeEncoderCache struct { + cache sync.Map // map[reflect.Type]ValueEncoder +} + +func (c *typeEncoderCache) Store(rt reflect.Type, enc ValueEncoder) { + c.cache.Store(rt, enc) +} + +func (c *typeEncoderCache) Load(rt reflect.Type) (ValueEncoder, bool) { + if v, _ := c.cache.Load(rt); v != nil { + return v.(ValueEncoder), true + } + return nil, false +} + +func (c *typeEncoderCache) LoadOrStore(rt reflect.Type, enc ValueEncoder) ValueEncoder { + if v, loaded := c.cache.LoadOrStore(rt, enc); loaded { + enc = v.(ValueEncoder) + } + return enc +} + +func (c *typeEncoderCache) Clone() *typeEncoderCache { + cc := new(typeEncoderCache) + c.cache.Range(func(k, v interface{}) bool { + if k != nil && v != nil { + cc.cache.Store(k, v) + } + return true + }) + return cc +} + +type typeDecoderCache struct { + cache sync.Map // map[reflect.Type]ValueDecoder +} + +func (c *typeDecoderCache) Store(rt reflect.Type, dec ValueDecoder) { + c.cache.Store(rt, dec) +} + +func (c *typeDecoderCache) Load(rt reflect.Type) (ValueDecoder, bool) { + if v, _ := c.cache.Load(rt); v != nil { + return v.(ValueDecoder), true + } + return nil, false +} + +func (c *typeDecoderCache) LoadOrStore(rt reflect.Type, dec ValueDecoder) ValueDecoder { + if v, loaded := c.cache.LoadOrStore(rt, dec); loaded { + dec = v.(ValueDecoder) + } + return dec +} + +func (c *typeDecoderCache) Clone() *typeDecoderCache { + cc := new(typeDecoderCache) + c.cache.Range(func(k, v interface{}) bool { + if k != nil && v != nil { + cc.cache.Store(k, v) + } + return true + }) + return cc +} + +// atomic.Value requires that all calls to Store() have the same concrete type +// so we wrap the ValueEncoder with a kindEncoderCacheEntry to ensure the type +// is always the same (since different concrete types may implement the +// ValueEncoder interface). +type kindEncoderCacheEntry struct { + enc ValueEncoder +} + +type kindEncoderCache struct { + entries [reflect.UnsafePointer + 1]atomic.Value // *kindEncoderCacheEntry +} + +func (c *kindEncoderCache) Store(rt reflect.Kind, enc ValueEncoder) { + if enc != nil && rt < reflect.Kind(len(c.entries)) { + c.entries[rt].Store(&kindEncoderCacheEntry{enc: enc}) + } +} + +func (c *kindEncoderCache) Load(rt reflect.Kind) (ValueEncoder, bool) { + if rt < reflect.Kind(len(c.entries)) { + if ent, ok := c.entries[rt].Load().(*kindEncoderCacheEntry); ok { + return ent.enc, ent.enc != nil + } + } + return nil, false +} + +func (c *kindEncoderCache) Clone() *kindEncoderCache { + cc := new(kindEncoderCache) + for i, v := range c.entries { + if val := v.Load(); val != nil { + cc.entries[i].Store(val) + } + } + return cc +} + +// atomic.Value requires that all calls to Store() have the same concrete type +// so we wrap the ValueDecoder with a kindDecoderCacheEntry to ensure the type +// is always the same (since different concrete types may implement the +// ValueDecoder interface). +type kindDecoderCacheEntry struct { + dec ValueDecoder +} + +type kindDecoderCache struct { + entries [reflect.UnsafePointer + 1]atomic.Value // *kindDecoderCacheEntry +} + +func (c *kindDecoderCache) Store(rt reflect.Kind, dec ValueDecoder) { + if rt < reflect.Kind(len(c.entries)) { + c.entries[rt].Store(&kindDecoderCacheEntry{dec: dec}) + } +} + +func (c *kindDecoderCache) Load(rt reflect.Kind) (ValueDecoder, bool) { + if rt < reflect.Kind(len(c.entries)) { + if ent, ok := c.entries[rt].Load().(*kindDecoderCacheEntry); ok { + return ent.dec, ent.dec != nil + } + } + return nil, false +} + +func (c *kindDecoderCache) Clone() *kindDecoderCache { + cc := new(kindDecoderCache) + for i, v := range c.entries { + if val := v.Load(); val != nil { + cc.entries[i].Store(val) + } + } + return cc +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go index e479c358..2ce11973 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go @@ -24,7 +24,7 @@ import ( var ( defaultValueDecoders DefaultValueDecoders - errCannotTruncate = errors.New("float64 can only be truncated to an integer type when truncation is enabled") + errCannotTruncate = errors.New("float64 can only be truncated to a lower precision type when truncation is enabled") ) type decodeBinaryError struct { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go index a1bf9c3e..e5923230 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go @@ -8,7 +8,6 @@ package bsoncodec import ( "reflect" - "sync" "go.mongodb.org/mongo-driver/bson/bsonrw" "go.mongodb.org/mongo-driver/bson/bsontype" @@ -22,9 +21,8 @@ var _ ValueDecoder = &PointerCodec{} // Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the // PointerCodec registered. type PointerCodec struct { - ecache map[reflect.Type]ValueEncoder - dcache map[reflect.Type]ValueDecoder - l sync.RWMutex + ecache typeEncoderCache + dcache typeDecoderCache } // NewPointerCodec returns a PointerCodec that has been initialized. @@ -32,10 +30,7 @@ type PointerCodec struct { // Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the // PointerCodec registered. func NewPointerCodec() *PointerCodec { - return &PointerCodec{ - ecache: make(map[reflect.Type]ValueEncoder), - dcache: make(map[reflect.Type]ValueDecoder), - } + return &PointerCodec{} } // EncodeValue handles encoding a pointer by either encoding it to BSON Null if the pointer is nil @@ -52,24 +47,19 @@ func (pc *PointerCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val return vw.WriteNull() } - pc.l.RLock() - enc, ok := pc.ecache[val.Type()] - pc.l.RUnlock() - if ok { - if enc == nil { - return ErrNoEncoder{Type: val.Type()} + typ := val.Type() + if v, ok := pc.ecache.Load(typ); ok { + if v == nil { + return ErrNoEncoder{Type: typ} } - return enc.EncodeValue(ec, vw, val.Elem()) + return v.EncodeValue(ec, vw, val.Elem()) } - - enc, err := ec.LookupEncoder(val.Type().Elem()) - pc.l.Lock() - pc.ecache[val.Type()] = enc - pc.l.Unlock() + // TODO(charlie): handle concurrent requests for the same type + enc, err := ec.LookupEncoder(typ.Elem()) + enc = pc.ecache.LoadOrStore(typ, enc) if err != nil { return err } - return enc.EncodeValue(ec, vw, val.Elem()) } @@ -80,36 +70,31 @@ func (pc *PointerCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val return ValueDecoderError{Name: "PointerCodec.DecodeValue", Kinds: []reflect.Kind{reflect.Ptr}, Received: val} } + typ := val.Type() if vr.Type() == bsontype.Null { - val.Set(reflect.Zero(val.Type())) + val.Set(reflect.Zero(typ)) return vr.ReadNull() } if vr.Type() == bsontype.Undefined { - val.Set(reflect.Zero(val.Type())) + val.Set(reflect.Zero(typ)) return vr.ReadUndefined() } if val.IsNil() { - val.Set(reflect.New(val.Type().Elem())) + val.Set(reflect.New(typ.Elem())) } - pc.l.RLock() - dec, ok := pc.dcache[val.Type()] - pc.l.RUnlock() - if ok { - if dec == nil { - return ErrNoDecoder{Type: val.Type()} + if v, ok := pc.dcache.Load(typ); ok { + if v == nil { + return ErrNoDecoder{Type: typ} } - return dec.DecodeValue(dc, vr, val.Elem()) + return v.DecodeValue(dc, vr, val.Elem()) } - - dec, err := dc.LookupDecoder(val.Type().Elem()) - pc.l.Lock() - pc.dcache[val.Type()] = dec - pc.l.Unlock() + // TODO(charlie): handle concurrent requests for the same type + dec, err := dc.LookupDecoder(typ.Elem()) + dec = pc.dcache.LoadOrStore(typ, dec) if err != nil { return err } - return dec.DecodeValue(dc, vr, val.Elem()) } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go index 930de284..196c491b 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go @@ -216,72 +216,42 @@ func (rb *RegistryBuilder) RegisterTypeMapEntry(bt bsontype.Type, rt reflect.Typ // // Deprecated: Use NewRegistry instead. func (rb *RegistryBuilder) Build() *Registry { - registry := new(Registry) - - registry.typeEncoders = make(map[reflect.Type]ValueEncoder, len(rb.registry.typeEncoders)) - for t, enc := range rb.registry.typeEncoders { - registry.typeEncoders[t] = enc - } - - registry.typeDecoders = make(map[reflect.Type]ValueDecoder, len(rb.registry.typeDecoders)) - for t, dec := range rb.registry.typeDecoders { - registry.typeDecoders[t] = dec - } - - registry.interfaceEncoders = make([]interfaceValueEncoder, len(rb.registry.interfaceEncoders)) - copy(registry.interfaceEncoders, rb.registry.interfaceEncoders) - - registry.interfaceDecoders = make([]interfaceValueDecoder, len(rb.registry.interfaceDecoders)) - copy(registry.interfaceDecoders, rb.registry.interfaceDecoders) - - registry.kindEncoders = make(map[reflect.Kind]ValueEncoder) - for kind, enc := range rb.registry.kindEncoders { - registry.kindEncoders[kind] = enc - } - - registry.kindDecoders = make(map[reflect.Kind]ValueDecoder) - for kind, dec := range rb.registry.kindDecoders { - registry.kindDecoders[kind] = dec + r := &Registry{ + interfaceEncoders: append([]interfaceValueEncoder(nil), rb.registry.interfaceEncoders...), + interfaceDecoders: append([]interfaceValueDecoder(nil), rb.registry.interfaceDecoders...), + typeEncoders: rb.registry.typeEncoders.Clone(), + typeDecoders: rb.registry.typeDecoders.Clone(), + kindEncoders: rb.registry.kindEncoders.Clone(), + kindDecoders: rb.registry.kindDecoders.Clone(), } - - registry.typeMap = make(map[bsontype.Type]reflect.Type) - for bt, rt := range rb.registry.typeMap { - registry.typeMap[bt] = rt - } - - return registry + rb.registry.typeMap.Range(func(k, v interface{}) bool { + if k != nil && v != nil { + r.typeMap.Store(k, v) + } + return true + }) + return r } // A Registry is used to store and retrieve codecs for types and interfaces. This type is the main // typed passed around and Encoders and Decoders are constructed from it. type Registry struct { - typeEncoders map[reflect.Type]ValueEncoder - typeDecoders map[reflect.Type]ValueDecoder - interfaceEncoders []interfaceValueEncoder interfaceDecoders []interfaceValueDecoder - - kindEncoders map[reflect.Kind]ValueEncoder - kindDecoders map[reflect.Kind]ValueDecoder - - typeMap map[bsontype.Type]reflect.Type - - mu sync.RWMutex + typeEncoders *typeEncoderCache + typeDecoders *typeDecoderCache + kindEncoders *kindEncoderCache + kindDecoders *kindDecoderCache + typeMap sync.Map // map[bsontype.Type]reflect.Type } // NewRegistry creates a new empty Registry. func NewRegistry() *Registry { return &Registry{ - typeEncoders: make(map[reflect.Type]ValueEncoder), - typeDecoders: make(map[reflect.Type]ValueDecoder), - - interfaceEncoders: make([]interfaceValueEncoder, 0), - interfaceDecoders: make([]interfaceValueDecoder, 0), - - kindEncoders: make(map[reflect.Kind]ValueEncoder), - kindDecoders: make(map[reflect.Kind]ValueDecoder), - - typeMap: make(map[bsontype.Type]reflect.Type), + typeEncoders: new(typeEncoderCache), + typeDecoders: new(typeDecoderCache), + kindEncoders: new(kindEncoderCache), + kindDecoders: new(kindDecoderCache), } } @@ -296,7 +266,7 @@ func NewRegistry() *Registry { // // RegisterTypeEncoder should not be called concurrently with any other Registry method. func (r *Registry) RegisterTypeEncoder(valueType reflect.Type, enc ValueEncoder) { - r.typeEncoders[valueType] = enc + r.typeEncoders.Store(valueType, enc) } // RegisterTypeDecoder registers the provided ValueDecoder for the provided type. @@ -310,7 +280,7 @@ func (r *Registry) RegisterTypeEncoder(valueType reflect.Type, enc ValueEncoder) // // RegisterTypeDecoder should not be called concurrently with any other Registry method. func (r *Registry) RegisterTypeDecoder(valueType reflect.Type, dec ValueDecoder) { - r.typeDecoders[valueType] = dec + r.typeDecoders.Store(valueType, dec) } // RegisterKindEncoder registers the provided ValueEncoder for the provided kind. @@ -326,7 +296,7 @@ func (r *Registry) RegisterTypeDecoder(valueType reflect.Type, dec ValueDecoder) // // RegisterKindEncoder should not be called concurrently with any other Registry method. func (r *Registry) RegisterKindEncoder(kind reflect.Kind, enc ValueEncoder) { - r.kindEncoders[kind] = enc + r.kindEncoders.Store(kind, enc) } // RegisterKindDecoder registers the provided ValueDecoder for the provided kind. @@ -342,7 +312,7 @@ func (r *Registry) RegisterKindEncoder(kind reflect.Kind, enc ValueEncoder) { // // RegisterKindDecoder should not be called concurrently with any other Registry method. func (r *Registry) RegisterKindDecoder(kind reflect.Kind, dec ValueDecoder) { - r.kindDecoders[kind] = dec + r.kindDecoders.Store(kind, dec) } // RegisterInterfaceEncoder registers an encoder for the provided interface type iface. This encoder will @@ -401,7 +371,7 @@ func (r *Registry) RegisterInterfaceDecoder(iface reflect.Type, dec ValueDecoder // // reg.RegisterTypeMapEntry(bsontype.EmbeddedDocument, reflect.TypeOf(bson.Raw{})) func (r *Registry) RegisterTypeMapEntry(bt bsontype.Type, rt reflect.Type) { - r.typeMap[bt] = rt + r.typeMap.Store(bt, rt) } // LookupEncoder returns the first matching encoder in the Registry. It uses the following lookup @@ -418,9 +388,10 @@ func (r *Registry) RegisterTypeMapEntry(bt bsontype.Type, rt reflect.Type) { // If no encoder is found, an error of type ErrNoEncoder is returned. LookupEncoder is safe for // concurrent use by multiple goroutines after all codecs and encoders are registered. func (r *Registry) LookupEncoder(valueType reflect.Type) (ValueEncoder, error) { - r.mu.RLock() + if valueType == nil { + return nil, ErrNoEncoder{Type: valueType} + } enc, found := r.lookupTypeEncoder(valueType) - r.mu.RUnlock() if found { if enc == nil { return nil, ErrNoEncoder{Type: valueType} @@ -430,36 +401,21 @@ func (r *Registry) LookupEncoder(valueType reflect.Type) (ValueEncoder, error) { enc, found = r.lookupInterfaceEncoder(valueType, true) if found { - r.mu.Lock() - r.typeEncoders[valueType] = enc - r.mu.Unlock() - return enc, nil + return r.typeEncoders.LoadOrStore(valueType, enc), nil } - if valueType == nil { - r.mu.Lock() - r.typeEncoders[valueType] = nil - r.mu.Unlock() - return nil, ErrNoEncoder{Type: valueType} - } - - enc, found = r.kindEncoders[valueType.Kind()] - if !found { - r.mu.Lock() - r.typeEncoders[valueType] = nil - r.mu.Unlock() - return nil, ErrNoEncoder{Type: valueType} + if v, ok := r.kindEncoders.Load(valueType.Kind()); ok { + return r.storeTypeEncoder(valueType, v), nil } + return nil, ErrNoEncoder{Type: valueType} +} - r.mu.Lock() - r.typeEncoders[valueType] = enc - r.mu.Unlock() - return enc, nil +func (r *Registry) storeTypeEncoder(rt reflect.Type, enc ValueEncoder) ValueEncoder { + return r.typeEncoders.LoadOrStore(rt, enc) } -func (r *Registry) lookupTypeEncoder(valueType reflect.Type) (ValueEncoder, bool) { - enc, found := r.typeEncoders[valueType] - return enc, found +func (r *Registry) lookupTypeEncoder(rt reflect.Type) (ValueEncoder, bool) { + return r.typeEncoders.Load(rt) } func (r *Registry) lookupInterfaceEncoder(valueType reflect.Type, allowAddr bool) (ValueEncoder, bool) { @@ -475,7 +431,7 @@ func (r *Registry) lookupInterfaceEncoder(valueType reflect.Type, allowAddr bool // ahead in interfaceEncoders defaultEnc, found := r.lookupInterfaceEncoder(valueType, false) if !found { - defaultEnc = r.kindEncoders[valueType.Kind()] + defaultEnc, _ = r.kindEncoders.Load(valueType.Kind()) } return newCondAddrEncoder(ienc.ve, defaultEnc), true } @@ -500,10 +456,7 @@ func (r *Registry) LookupDecoder(valueType reflect.Type) (ValueDecoder, error) { if valueType == nil { return nil, ErrNilType } - decodererr := ErrNoDecoder{Type: valueType} - r.mu.RLock() dec, found := r.lookupTypeDecoder(valueType) - r.mu.RUnlock() if found { if dec == nil { return nil, ErrNoDecoder{Type: valueType} @@ -513,29 +466,21 @@ func (r *Registry) LookupDecoder(valueType reflect.Type) (ValueDecoder, error) { dec, found = r.lookupInterfaceDecoder(valueType, true) if found { - r.mu.Lock() - r.typeDecoders[valueType] = dec - r.mu.Unlock() - return dec, nil + return r.storeTypeDecoder(valueType, dec), nil } - dec, found = r.kindDecoders[valueType.Kind()] - if !found { - r.mu.Lock() - r.typeDecoders[valueType] = nil - r.mu.Unlock() - return nil, decodererr + if v, ok := r.kindDecoders.Load(valueType.Kind()); ok { + return r.storeTypeDecoder(valueType, v), nil } - - r.mu.Lock() - r.typeDecoders[valueType] = dec - r.mu.Unlock() - return dec, nil + return nil, ErrNoDecoder{Type: valueType} } func (r *Registry) lookupTypeDecoder(valueType reflect.Type) (ValueDecoder, bool) { - dec, found := r.typeDecoders[valueType] - return dec, found + return r.typeDecoders.Load(valueType) +} + +func (r *Registry) storeTypeDecoder(typ reflect.Type, dec ValueDecoder) ValueDecoder { + return r.typeDecoders.LoadOrStore(typ, dec) } func (r *Registry) lookupInterfaceDecoder(valueType reflect.Type, allowAddr bool) (ValueDecoder, bool) { @@ -548,7 +493,7 @@ func (r *Registry) lookupInterfaceDecoder(valueType reflect.Type, allowAddr bool // ahead in interfaceDecoders defaultDec, found := r.lookupInterfaceDecoder(valueType, false) if !found { - defaultDec = r.kindDecoders[valueType.Kind()] + defaultDec, _ = r.kindDecoders.Load(valueType.Kind()) } return newCondAddrDecoder(idec.vd, defaultDec), true } @@ -561,11 +506,11 @@ func (r *Registry) lookupInterfaceDecoder(valueType reflect.Type, allowAddr bool // // LookupTypeMapEntry should not be called concurrently with any other Registry method. func (r *Registry) LookupTypeMapEntry(bt bsontype.Type) (reflect.Type, error) { - t, ok := r.typeMap[bt] - if !ok || t == nil { + v, ok := r.typeMap.Load(bt) + if v == nil || !ok { return nil, ErrNoTypeMapEntry{Type: bt} } - return t, nil + return v.(reflect.Type), nil } type interfaceValueEncoder struct { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go index 20c3e754..a43daf00 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go @@ -62,7 +62,7 @@ func (sc SliceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val re } // If we have a []primitive.E we want to treat it as a document instead of as an array. - if val.Type().ConvertibleTo(tD) { + if val.Type() == tD || val.Type().ConvertibleTo(tD) { d := val.Convert(tD).Interface().(primitive.D) dw, err := vw.WriteDocument() diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go index 1dfdd988..4cde0a4d 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go @@ -63,8 +63,7 @@ type Zeroer interface { // Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the // StructCodec registered. type StructCodec struct { - cache map[reflect.Type]*structDescription - l sync.RWMutex + cache sync.Map // map[reflect.Type]*structDescription parser StructTagParser // DecodeZeroStruct causes DecodeValue to delete any existing values from Go structs in the @@ -115,7 +114,6 @@ func NewStructCodec(p StructTagParser, opts ...*bsonoptions.StructCodecOptions) structOpt := bsonoptions.MergeStructCodecOptions(opts...) codec := &StructCodec{ - cache: make(map[reflect.Type]*structDescription), parser: p, } @@ -192,15 +190,14 @@ func (sc *StructCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val encoder := desc.encoder var zero bool - rvInterface := rv.Interface() if cz, ok := encoder.(CodecZeroer); ok { - zero = cz.IsTypeZero(rvInterface) + zero = cz.IsTypeZero(rv.Interface()) } else if rv.Kind() == reflect.Interface { // isZero will not treat an interface rv as an interface, so we need to check for the // zero interface separately. zero = rv.IsNil() } else { - zero = isZero(rvInterface, sc.EncodeOmitDefaultStruct || ec.omitZeroStruct) + zero = isZero(rv, sc.EncodeOmitDefaultStruct || ec.omitZeroStruct) } if desc.omitEmpty && zero { continue @@ -394,56 +391,32 @@ func (sc *StructCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val return nil } -func isZero(i interface{}, omitZeroStruct bool) bool { - v := reflect.ValueOf(i) - - // check the value validity - if !v.IsValid() { - return true +func isZero(v reflect.Value, omitZeroStruct bool) bool { + kind := v.Kind() + if (kind != reflect.Ptr || !v.IsNil()) && v.Type().Implements(tZeroer) { + return v.Interface().(Zeroer).IsZero() } - - if z, ok := v.Interface().(Zeroer); ok && (v.Kind() != reflect.Ptr || !v.IsNil()) { - return z.IsZero() - } - - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - case reflect.Struct: + if kind == reflect.Struct { if !omitZeroStruct { return false } - - // TODO(GODRIVER-2820): Update the logic to be able to handle private struct fields. - // TODO Use condition "reflect.Zero(v.Type()).Equal(v)" instead. - vt := v.Type() if vt == tTime { return v.Interface().(time.Time).IsZero() } - for i := 0; i < v.NumField(); i++ { - if vt.Field(i).PkgPath != "" && !vt.Field(i).Anonymous { + numField := vt.NumField() + for i := 0; i < numField; i++ { + ff := vt.Field(i) + if ff.PkgPath != "" && !ff.Anonymous { continue // Private field } - fld := v.Field(i) - if !isZero(fld.Interface(), omitZeroStruct) { + if !isZero(v.Field(i), omitZeroStruct) { return false } } return true } - - return false + return !v.IsValid() || v.IsZero() } type structDescription struct { @@ -502,13 +475,27 @@ func (sc *StructCodec) describeStruct( ) (*structDescription, error) { // We need to analyze the struct, including getting the tags, collecting // information about inlining, and create a map of the field name to the field. - sc.l.RLock() - ds, exists := sc.cache[t] - sc.l.RUnlock() - if exists { - return ds, nil + if v, ok := sc.cache.Load(t); ok { + return v.(*structDescription), nil + } + // TODO(charlie): Only describe the struct once when called + // concurrently with the same type. + ds, err := sc.describeStructSlow(r, t, useJSONStructTags, errorOnDuplicates) + if err != nil { + return nil, err } + if v, loaded := sc.cache.LoadOrStore(t, ds); loaded { + ds = v.(*structDescription) + } + return ds, nil +} +func (sc *StructCodec) describeStructSlow( + r *Registry, + t reflect.Type, + useJSONStructTags bool, + errorOnDuplicates bool, +) (*structDescription, error) { numFields := t.NumField() sd := &structDescription{ fm: make(map[string]fieldDescription, numFields), @@ -639,10 +626,6 @@ func (sc *StructCodec) describeStruct( sort.Sort(byIndex(sd.fl)) - sc.l.Lock() - sc.cache[t] = sd - sc.l.Unlock() - return sd, nil } @@ -700,21 +683,21 @@ func getInlineField(val reflect.Value, index []int) (reflect.Value, error) { // DeepZero returns recursive zero object func deepZero(st reflect.Type) (result reflect.Value) { - result = reflect.Indirect(reflect.New(st)) - - if result.Kind() == reflect.Struct { - for i := 0; i < result.NumField(); i++ { - if f := result.Field(i); f.Kind() == reflect.Ptr { - if f.CanInterface() { - if ft := reflect.TypeOf(f.Interface()); ft.Elem().Kind() == reflect.Struct { - result.Field(i).Set(recursivePointerTo(deepZero(ft.Elem()))) - } + if st.Kind() == reflect.Struct { + numField := st.NumField() + for i := 0; i < numField; i++ { + if result == emptyValue { + result = reflect.Indirect(reflect.New(st)) + } + f := result.Field(i) + if f.CanInterface() { + if f.Type().Kind() == reflect.Struct { + result.Field(i).Set(recursivePointerTo(deepZero(f.Type().Elem()))) } } } } - - return + return result } // recursivePointerTo calls reflect.New(v.Type) but recursively for its fields inside diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go index 07f4b70e..6ade17b7 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go @@ -34,6 +34,7 @@ var tValueUnmarshaler = reflect.TypeOf((*ValueUnmarshaler)(nil)).Elem() var tMarshaler = reflect.TypeOf((*Marshaler)(nil)).Elem() var tUnmarshaler = reflect.TypeOf((*Unmarshaler)(nil)).Elem() var tProxy = reflect.TypeOf((*Proxy)(nil)).Elem() +var tZeroer = reflect.TypeOf((*Zeroer)(nil)).Elem() var tBinary = reflect.TypeOf(primitive.Binary{}) var tUndefined = reflect.TypeOf(primitive.Undefined{}) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go index 33d59bd2..4d279b7f 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go @@ -124,7 +124,7 @@ func (c Copier) CopyBytesToDocumentWriter(dst DocumentWriter, src []byte) error } func (c Copier) copyBytesToValueWriter(src []byte, wef writeElementFn) error { - // TODO(skriptble): Create errors types here. Anything thats a tag should be a property. + // TODO(skriptble): Create errors types here. Anything that is a tag should be a property. length, rem, ok := bsoncore.ReadLength(src) if !ok { return fmt.Errorf("couldn't read length from src, not enough bytes. length=%d", len(src)) @@ -193,7 +193,7 @@ func (c Copier) AppendDocumentBytes(dst []byte, src ValueReader) ([]byte, error) } vw := vwPool.Get().(*valueWriter) - defer vwPool.Put(vw) + defer putValueWriter(vw) vw.reset(dst) @@ -213,7 +213,7 @@ func (c Copier) AppendArrayBytes(dst []byte, src ValueReader) ([]byte, error) { } vw := vwPool.Get().(*valueWriter) - defer vwPool.Put(vw) + defer putValueWriter(vw) vw.reset(dst) @@ -258,7 +258,7 @@ func (c Copier) AppendValueBytes(dst []byte, src ValueReader) (bsontype.Type, [] } vw := vwPool.Get().(*valueWriter) - defer vwPool.Put(vw) + defer putValueWriter(vw) start := len(dst) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go index 9bf24fae..a242bb57 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go @@ -739,8 +739,7 @@ func (vr *valueReader) ReadValue() (ValueReader, error) { return nil, ErrEOA } - _, err = vr.readCString() - if err != nil { + if err := vr.skipCString(); err != nil { return nil, err } @@ -794,6 +793,15 @@ func (vr *valueReader) readByte() (byte, error) { return vr.d[vr.offset-1], nil } +func (vr *valueReader) skipCString() error { + idx := bytes.IndexByte(vr.d[vr.offset:], 0x00) + if idx < 0 { + return io.EOF + } + vr.offset += int64(idx) + 1 + return nil +} + func (vr *valueReader) readCString() (string, error) { idx := bytes.IndexByte(vr.d[vr.offset:], 0x00) if idx < 0 { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go index a6dd8d34..311518a8 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go @@ -28,6 +28,13 @@ var vwPool = sync.Pool{ }, } +func putValueWriter(vw *valueWriter) { + if vw != nil { + vw.w = nil // don't leak the writer + vwPool.Put(vw) + } +} + // BSONValueWriterPool is a pool for BSON ValueWriters. // // Deprecated: BSONValueWriterPool will not be supported in Go Driver 2.0. @@ -149,32 +156,21 @@ type valueWriter struct { } func (vw *valueWriter) advanceFrame() { - if vw.frame+1 >= int64(len(vw.stack)) { // We need to grow the stack - length := len(vw.stack) - if length+1 >= cap(vw.stack) { - // double it - buf := make([]vwState, 2*cap(vw.stack)+1) - copy(buf, vw.stack) - vw.stack = buf - } - vw.stack = vw.stack[:length+1] - } vw.frame++ + if vw.frame >= int64(len(vw.stack)) { + vw.stack = append(vw.stack, vwState{}) + } } func (vw *valueWriter) push(m mode) { vw.advanceFrame() // Clean the stack - vw.stack[vw.frame].mode = m - vw.stack[vw.frame].key = "" - vw.stack[vw.frame].arrkey = 0 - vw.stack[vw.frame].start = 0 + vw.stack[vw.frame] = vwState{mode: m} - vw.stack[vw.frame].mode = m switch m { case mDocument, mArray, mCodeWithScope: - vw.reserveLength() + vw.reserveLength() // WARN: this is not needed } } @@ -213,6 +209,7 @@ func newValueWriter(w io.Writer) *valueWriter { return vw } +// TODO: only used in tests func newValueWriterFromSlice(buf []byte) *valueWriter { vw := new(valueWriter) stack := make([]vwState, 1, 5) @@ -249,17 +246,16 @@ func (vw *valueWriter) invalidTransitionError(destination mode, name string, mod } func (vw *valueWriter) writeElementHeader(t bsontype.Type, destination mode, callerName string, addmodes ...mode) error { - switch vw.stack[vw.frame].mode { + frame := &vw.stack[vw.frame] + switch frame.mode { case mElement: - key := vw.stack[vw.frame].key + key := frame.key if !isValidCString(key) { return errors.New("BSON element key cannot contain null bytes") } - - vw.buf = bsoncore.AppendHeader(vw.buf, t, key) + vw.appendHeader(t, key) case mValue: - // TODO: Do this with a cache of the first 1000 or so array keys. - vw.buf = bsoncore.AppendHeader(vw.buf, t, strconv.Itoa(vw.stack[vw.frame].arrkey)) + vw.appendIntHeader(t, frame.arrkey) default: modes := []mode{mElement, mValue} if addmodes != nil { @@ -601,9 +597,11 @@ func (vw *valueWriter) writeLength() error { if length > maxSize { return errMaxDocumentSizeExceeded{size: int64(len(vw.buf))} } - length = length - int(vw.stack[vw.frame].start) - start := vw.stack[vw.frame].start + frame := &vw.stack[vw.frame] + length = length - int(frame.start) + start := frame.start + _ = vw.buf[start+3] // BCE vw.buf[start+0] = byte(length) vw.buf[start+1] = byte(length >> 8) vw.buf[start+2] = byte(length >> 16) @@ -612,5 +610,31 @@ func (vw *valueWriter) writeLength() error { } func isValidCString(cs string) bool { - return !strings.ContainsRune(cs, '\x00') + // Disallow the zero byte in a cstring because the zero byte is used as the + // terminating character. + // + // It's safe to check bytes instead of runes because all multibyte UTF-8 + // code points start with (binary) 11xxxxxx or 10xxxxxx, so 00000000 (i.e. + // 0) will never be part of a multibyte UTF-8 code point. This logic is the + // same as the "r < utf8.RuneSelf" case in strings.IndexRune but can be + // inlined. + // + // https://cs.opensource.google/go/go/+/refs/tags/go1.21.1:src/strings/strings.go;l=127 + return strings.IndexByte(cs, 0) == -1 +} + +// appendHeader is the same as bsoncore.AppendHeader but does not check if the +// key is a valid C string since the caller has already checked for that. +// +// The caller of this function must check if key is a valid C string. +func (vw *valueWriter) appendHeader(t bsontype.Type, key string) { + vw.buf = bsoncore.AppendType(vw.buf, t) + vw.buf = append(vw.buf, key...) + vw.buf = append(vw.buf, 0x00) +} + +func (vw *valueWriter) appendIntHeader(t bsontype.Type, key int) { + vw.buf = bsoncore.AppendType(vw.buf, t) + vw.buf = strconv.AppendInt(vw.buf, int64(key), 10) + vw.buf = append(vw.buf, 0x00) } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsontype/bsontype.go b/vendor/go.mongodb.org/mongo-driver/bson/bsontype/bsontype.go index 8cff5492..255d9909 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsontype/bsontype.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsontype/bsontype.go @@ -47,6 +47,7 @@ const ( BinaryMD5 byte = 0x05 BinaryEncrypted byte = 0x06 BinaryColumn byte = 0x07 + BinarySensitive byte = 0x08 BinaryUserDefined byte = 0x80 ) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/marshal.go b/vendor/go.mongodb.org/mongo-driver/bson/marshal.go index f2c48d04..17ce6697 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/marshal.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/marshal.go @@ -9,6 +9,7 @@ package bson import ( "bytes" "encoding/json" + "sync" "go.mongodb.org/mongo-driver/bson/bsoncodec" "go.mongodb.org/mongo-driver/bson/bsonrw" @@ -141,6 +142,13 @@ func MarshalAppendWithRegistry(r *bsoncodec.Registry, dst []byte, val interface{ return MarshalAppendWithContext(bsoncodec.EncodeContext{Registry: r}, dst, val) } +// Pool of buffers for marshalling BSON. +var bufPool = sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, +} + // MarshalAppendWithContext will encode val as a BSON document using Registry r and EncodeContext ec and append the // bytes to dst. If dst is not large enough to hold the bytes, it will be grown. If val is not a type that can be // transformed into a document, MarshalValueAppendWithContext should be used instead. @@ -162,8 +170,26 @@ func MarshalAppendWithRegistry(r *bsoncodec.Registry, dst []byte, val interface{ // // See [Encoder] for more examples. func MarshalAppendWithContext(ec bsoncodec.EncodeContext, dst []byte, val interface{}) ([]byte, error) { - sw := new(bsonrw.SliceWriter) - *sw = dst + sw := bufPool.Get().(*bytes.Buffer) + defer func() { + // Proper usage of a sync.Pool requires each entry to have approximately + // the same memory cost. To obtain this property when the stored type + // contains a variably-sized buffer, we add a hard limit on the maximum + // buffer to place back in the pool. We limit the size to 16MiB because + // that's the maximum wire message size supported by any current MongoDB + // server. + // + // Comment based on + // https://cs.opensource.google/go/go/+/refs/tags/go1.19:src/fmt/print.go;l=147 + // + // Recycle byte slices that are smaller than 16MiB and at least half + // occupied. + if sw.Cap() < 16*1024*1024 && sw.Cap()/2 < sw.Len() { + bufPool.Put(sw) + } + }() + + sw.Reset() vw := bvwPool.Get(sw) defer bvwPool.Put(vw) @@ -184,7 +210,7 @@ func MarshalAppendWithContext(ec bsoncodec.EncodeContext, dst []byte, val interf return nil, err } - return *sw, nil + return append(dst, sw.Bytes()...), nil } // MarshalValue returns the BSON encoding of val. diff --git a/vendor/go.mongodb.org/mongo-driver/bson/raw.go b/vendor/go.mongodb.org/mongo-driver/bson/raw.go index fe990a17..130da61b 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/raw.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/raw.go @@ -60,12 +60,19 @@ func (r Raw) LookupErr(key ...string) (RawValue, error) { // elements. If the document is not valid, the elements up to the invalid point will be returned // along with an error. func (r Raw) Elements() ([]RawElement, error) { - elems, err := bsoncore.Document(r).Elements() + doc := bsoncore.Document(r) + if len(doc) == 0 { + return nil, nil + } + elems, err := doc.Elements() + if err != nil { + return nil, err + } relems := make([]RawElement, 0, len(elems)) for _, elem := range elems { relems = append(relems, RawElement(elem)) } - return relems, err + return relems, nil } // Values returns this document as a slice of values. The returned slice will contain valid values. diff --git a/vendor/go.mongodb.org/mongo-driver/bson/types.go b/vendor/go.mongodb.org/mongo-driver/bson/types.go index e201ac37..ef398124 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/types.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/types.go @@ -45,5 +45,6 @@ const ( TypeBinaryMD5 = bsontype.BinaryMD5 TypeBinaryEncrypted = bsontype.BinaryEncrypted TypeBinaryColumn = bsontype.BinaryColumn + TypeBinarySensitive = bsontype.BinarySensitive TypeBinaryUserDefined = bsontype.BinaryUserDefined ) diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go index e52674aa..88133293 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go @@ -235,7 +235,7 @@ func BuildDocumentValue(elems ...[]byte) Value { return Value{Type: bsontype.EmbeddedDocument, Data: BuildDocument(nil, elems...)} } -// BuildDocumentElement will append a BSON embedded document elemnt using key and the provided +// BuildDocumentElement will append a BSON embedded document element using key and the provided // elements and return the extended buffer. func BuildDocumentElement(dst []byte, key string, elems ...[]byte) []byte { return BuildDocument(AppendHeader(dst, bsontype.EmbeddedDocument, key), elems...) diff --git a/vendor/golang.org/x/crypto/argon2/argon2.go b/vendor/golang.org/x/crypto/argon2/argon2.go new file mode 100644 index 00000000..29f0a2de --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/argon2.go @@ -0,0 +1,283 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package argon2 implements the key derivation function Argon2. +// Argon2 was selected as the winner of the Password Hashing Competition and can +// be used to derive cryptographic keys from passwords. +// +// For a detailed specification of Argon2 see [1]. +// +// If you aren't sure which function you need, use Argon2id (IDKey) and +// the parameter recommendations for your scenario. +// +// # Argon2i +// +// Argon2i (implemented by Key) is the side-channel resistant version of Argon2. +// It uses data-independent memory access, which is preferred for password +// hashing and password-based key derivation. Argon2i requires more passes over +// memory than Argon2id to protect from trade-off attacks. The recommended +// parameters (taken from [2]) for non-interactive operations are time=3 and to +// use the maximum available memory. +// +// # Argon2id +// +// Argon2id (implemented by IDKey) is a hybrid version of Argon2 combining +// Argon2i and Argon2d. It uses data-independent memory access for the first +// half of the first iteration over the memory and data-dependent memory access +// for the rest. Argon2id is side-channel resistant and provides better brute- +// force cost savings due to time-memory tradeoffs than Argon2i. The recommended +// parameters for non-interactive operations (taken from [2]) are time=1 and to +// use the maximum available memory. +// +// [1] https://github.com/P-H-C/phc-winner-argon2/blob/master/argon2-specs.pdf +// [2] https://tools.ietf.org/html/draft-irtf-cfrg-argon2-03#section-9.3 +package argon2 + +import ( + "encoding/binary" + "sync" + + "golang.org/x/crypto/blake2b" +) + +// The Argon2 version implemented by this package. +const Version = 0x13 + +const ( + argon2d = iota + argon2i + argon2id +) + +// Key derives a key from the password, salt, and cost parameters using Argon2i +// returning a byte slice of length keyLen that can be used as cryptographic +// key. The CPU cost and parallelism degree must be greater than zero. +// +// For example, you can get a derived key for e.g. AES-256 (which needs a +// 32-byte key) by doing: +// +// key := argon2.Key([]byte("some password"), salt, 3, 32*1024, 4, 32) +// +// The draft RFC recommends[2] time=3, and memory=32*1024 is a sensible number. +// If using that amount of memory (32 MB) is not possible in some contexts then +// the time parameter can be increased to compensate. +// +// The time parameter specifies the number of passes over the memory and the +// memory parameter specifies the size of the memory in KiB. For example +// memory=32*1024 sets the memory cost to ~32 MB. The number of threads can be +// adjusted to the number of available CPUs. The cost parameters should be +// increased as memory latency and CPU parallelism increases. Remember to get a +// good random salt. +func Key(password, salt []byte, time, memory uint32, threads uint8, keyLen uint32) []byte { + return deriveKey(argon2i, password, salt, nil, nil, time, memory, threads, keyLen) +} + +// IDKey derives a key from the password, salt, and cost parameters using +// Argon2id returning a byte slice of length keyLen that can be used as +// cryptographic key. The CPU cost and parallelism degree must be greater than +// zero. +// +// For example, you can get a derived key for e.g. AES-256 (which needs a +// 32-byte key) by doing: +// +// key := argon2.IDKey([]byte("some password"), salt, 1, 64*1024, 4, 32) +// +// The draft RFC recommends[2] time=1, and memory=64*1024 is a sensible number. +// If using that amount of memory (64 MB) is not possible in some contexts then +// the time parameter can be increased to compensate. +// +// The time parameter specifies the number of passes over the memory and the +// memory parameter specifies the size of the memory in KiB. For example +// memory=64*1024 sets the memory cost to ~64 MB. The number of threads can be +// adjusted to the numbers of available CPUs. The cost parameters should be +// increased as memory latency and CPU parallelism increases. Remember to get a +// good random salt. +func IDKey(password, salt []byte, time, memory uint32, threads uint8, keyLen uint32) []byte { + return deriveKey(argon2id, password, salt, nil, nil, time, memory, threads, keyLen) +} + +func deriveKey(mode int, password, salt, secret, data []byte, time, memory uint32, threads uint8, keyLen uint32) []byte { + if time < 1 { + panic("argon2: number of rounds too small") + } + if threads < 1 { + panic("argon2: parallelism degree too low") + } + h0 := initHash(password, salt, secret, data, time, memory, uint32(threads), keyLen, mode) + + memory = memory / (syncPoints * uint32(threads)) * (syncPoints * uint32(threads)) + if memory < 2*syncPoints*uint32(threads) { + memory = 2 * syncPoints * uint32(threads) + } + B := initBlocks(&h0, memory, uint32(threads)) + processBlocks(B, time, memory, uint32(threads), mode) + return extractKey(B, memory, uint32(threads), keyLen) +} + +const ( + blockLength = 128 + syncPoints = 4 +) + +type block [blockLength]uint64 + +func initHash(password, salt, key, data []byte, time, memory, threads, keyLen uint32, mode int) [blake2b.Size + 8]byte { + var ( + h0 [blake2b.Size + 8]byte + params [24]byte + tmp [4]byte + ) + + b2, _ := blake2b.New512(nil) + binary.LittleEndian.PutUint32(params[0:4], threads) + binary.LittleEndian.PutUint32(params[4:8], keyLen) + binary.LittleEndian.PutUint32(params[8:12], memory) + binary.LittleEndian.PutUint32(params[12:16], time) + binary.LittleEndian.PutUint32(params[16:20], uint32(Version)) + binary.LittleEndian.PutUint32(params[20:24], uint32(mode)) + b2.Write(params[:]) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(password))) + b2.Write(tmp[:]) + b2.Write(password) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(salt))) + b2.Write(tmp[:]) + b2.Write(salt) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(key))) + b2.Write(tmp[:]) + b2.Write(key) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(data))) + b2.Write(tmp[:]) + b2.Write(data) + b2.Sum(h0[:0]) + return h0 +} + +func initBlocks(h0 *[blake2b.Size + 8]byte, memory, threads uint32) []block { + var block0 [1024]byte + B := make([]block, memory) + for lane := uint32(0); lane < threads; lane++ { + j := lane * (memory / threads) + binary.LittleEndian.PutUint32(h0[blake2b.Size+4:], lane) + + binary.LittleEndian.PutUint32(h0[blake2b.Size:], 0) + blake2bHash(block0[:], h0[:]) + for i := range B[j+0] { + B[j+0][i] = binary.LittleEndian.Uint64(block0[i*8:]) + } + + binary.LittleEndian.PutUint32(h0[blake2b.Size:], 1) + blake2bHash(block0[:], h0[:]) + for i := range B[j+1] { + B[j+1][i] = binary.LittleEndian.Uint64(block0[i*8:]) + } + } + return B +} + +func processBlocks(B []block, time, memory, threads uint32, mode int) { + lanes := memory / threads + segments := lanes / syncPoints + + processSegment := func(n, slice, lane uint32, wg *sync.WaitGroup) { + var addresses, in, zero block + if mode == argon2i || (mode == argon2id && n == 0 && slice < syncPoints/2) { + in[0] = uint64(n) + in[1] = uint64(lane) + in[2] = uint64(slice) + in[3] = uint64(memory) + in[4] = uint64(time) + in[5] = uint64(mode) + } + + index := uint32(0) + if n == 0 && slice == 0 { + index = 2 // we have already generated the first two blocks + if mode == argon2i || mode == argon2id { + in[6]++ + processBlock(&addresses, &in, &zero) + processBlock(&addresses, &addresses, &zero) + } + } + + offset := lane*lanes + slice*segments + index + var random uint64 + for index < segments { + prev := offset - 1 + if index == 0 && slice == 0 { + prev += lanes // last block in lane + } + if mode == argon2i || (mode == argon2id && n == 0 && slice < syncPoints/2) { + if index%blockLength == 0 { + in[6]++ + processBlock(&addresses, &in, &zero) + processBlock(&addresses, &addresses, &zero) + } + random = addresses[index%blockLength] + } else { + random = B[prev][0] + } + newOffset := indexAlpha(random, lanes, segments, threads, n, slice, lane, index) + processBlockXOR(&B[offset], &B[prev], &B[newOffset]) + index, offset = index+1, offset+1 + } + wg.Done() + } + + for n := uint32(0); n < time; n++ { + for slice := uint32(0); slice < syncPoints; slice++ { + var wg sync.WaitGroup + for lane := uint32(0); lane < threads; lane++ { + wg.Add(1) + go processSegment(n, slice, lane, &wg) + } + wg.Wait() + } + } + +} + +func extractKey(B []block, memory, threads, keyLen uint32) []byte { + lanes := memory / threads + for lane := uint32(0); lane < threads-1; lane++ { + for i, v := range B[(lane*lanes)+lanes-1] { + B[memory-1][i] ^= v + } + } + + var block [1024]byte + for i, v := range B[memory-1] { + binary.LittleEndian.PutUint64(block[i*8:], v) + } + key := make([]byte, keyLen) + blake2bHash(key, block[:]) + return key +} + +func indexAlpha(rand uint64, lanes, segments, threads, n, slice, lane, index uint32) uint32 { + refLane := uint32(rand>>32) % threads + if n == 0 && slice == 0 { + refLane = lane + } + m, s := 3*segments, ((slice+1)%syncPoints)*segments + if lane == refLane { + m += index + } + if n == 0 { + m, s = slice*segments, 0 + if slice == 0 || lane == refLane { + m += index + } + } + if index == 0 || lane == refLane { + m-- + } + return phi(rand, uint64(m), uint64(s), refLane, lanes) +} + +func phi(rand, m, s uint64, lane, lanes uint32) uint32 { + p := rand & 0xFFFFFFFF + p = (p * p) >> 32 + p = (p * m) >> 32 + return lane*lanes + uint32((s+m-(p+1))%uint64(lanes)) +} diff --git a/vendor/golang.org/x/crypto/argon2/blake2b.go b/vendor/golang.org/x/crypto/argon2/blake2b.go new file mode 100644 index 00000000..10f46948 --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blake2b.go @@ -0,0 +1,53 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package argon2 + +import ( + "encoding/binary" + "hash" + + "golang.org/x/crypto/blake2b" +) + +// blake2bHash computes an arbitrary long hash value of in +// and writes the hash to out. +func blake2bHash(out []byte, in []byte) { + var b2 hash.Hash + if n := len(out); n < blake2b.Size { + b2, _ = blake2b.New(n, nil) + } else { + b2, _ = blake2b.New512(nil) + } + + var buffer [blake2b.Size]byte + binary.LittleEndian.PutUint32(buffer[:4], uint32(len(out))) + b2.Write(buffer[:4]) + b2.Write(in) + + if len(out) <= blake2b.Size { + b2.Sum(out[:0]) + return + } + + outLen := len(out) + b2.Sum(buffer[:0]) + b2.Reset() + copy(out, buffer[:32]) + out = out[32:] + for len(out) > blake2b.Size { + b2.Write(buffer[:]) + b2.Sum(buffer[:0]) + copy(out, buffer[:32]) + out = out[32:] + b2.Reset() + } + + if outLen%blake2b.Size > 0 { // outLen > 64 + r := ((outLen + 31) / 32) - 2 // ⌈τ /32⌉-2 + b2, _ = blake2b.New(outLen-32*r, nil) + } + b2.Write(buffer[:]) + b2.Sum(out[:0]) +} diff --git a/vendor/golang.org/x/crypto/argon2/blamka_amd64.go b/vendor/golang.org/x/crypto/argon2/blamka_amd64.go new file mode 100644 index 00000000..063e7784 --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_amd64.go @@ -0,0 +1,60 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && gc && !purego + +package argon2 + +import "golang.org/x/sys/cpu" + +func init() { + useSSE4 = cpu.X86.HasSSE41 +} + +//go:noescape +func mixBlocksSSE2(out, a, b, c *block) + +//go:noescape +func xorBlocksSSE2(out, a, b, c *block) + +//go:noescape +func blamkaSSE4(b *block) + +func processBlockSSE(out, in1, in2 *block, xor bool) { + var t block + mixBlocksSSE2(&t, in1, in2, &t) + if useSSE4 { + blamkaSSE4(&t) + } else { + for i := 0; i < blockLength; i += 16 { + blamkaGeneric( + &t[i+0], &t[i+1], &t[i+2], &t[i+3], + &t[i+4], &t[i+5], &t[i+6], &t[i+7], + &t[i+8], &t[i+9], &t[i+10], &t[i+11], + &t[i+12], &t[i+13], &t[i+14], &t[i+15], + ) + } + for i := 0; i < blockLength/8; i += 2 { + blamkaGeneric( + &t[i], &t[i+1], &t[16+i], &t[16+i+1], + &t[32+i], &t[32+i+1], &t[48+i], &t[48+i+1], + &t[64+i], &t[64+i+1], &t[80+i], &t[80+i+1], + &t[96+i], &t[96+i+1], &t[112+i], &t[112+i+1], + ) + } + } + if xor { + xorBlocksSSE2(out, in1, in2, &t) + } else { + mixBlocksSSE2(out, in1, in2, &t) + } +} + +func processBlock(out, in1, in2 *block) { + processBlockSSE(out, in1, in2, false) +} + +func processBlockXOR(out, in1, in2 *block) { + processBlockSSE(out, in1, in2, true) +} diff --git a/vendor/golang.org/x/crypto/argon2/blamka_amd64.s b/vendor/golang.org/x/crypto/argon2/blamka_amd64.s new file mode 100644 index 00000000..6713acca --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_amd64.s @@ -0,0 +1,243 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && gc && !purego + +#include "textflag.h" + +DATA ·c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·c40<>(SB), (NOPTR+RODATA), $16 + +DATA ·c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·c48<>(SB), (NOPTR+RODATA), $16 + +#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v6, t1; \ + PUNPCKLQDQ v6, t2; \ + PUNPCKHQDQ v7, v6; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ v7, t2; \ + MOVO t1, v7; \ + MOVO v2, t1; \ + PUNPCKHQDQ t2, v7; \ + PUNPCKLQDQ v3, t2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v3 + +#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v2, t1; \ + PUNPCKLQDQ v2, t2; \ + PUNPCKHQDQ v3, v2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ v3, t2; \ + MOVO t1, v3; \ + MOVO v6, t1; \ + PUNPCKHQDQ t2, v3; \ + PUNPCKLQDQ v7, t2; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v7 + +#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, t0, c40, c48) \ + MOVO v0, t0; \ + PMULULQ v2, t0; \ + PADDQ v2, v0; \ + PADDQ t0, v0; \ + PADDQ t0, v0; \ + PXOR v0, v6; \ + PSHUFD $0xB1, v6, v6; \ + MOVO v4, t0; \ + PMULULQ v6, t0; \ + PADDQ v6, v4; \ + PADDQ t0, v4; \ + PADDQ t0, v4; \ + PXOR v4, v2; \ + PSHUFB c40, v2; \ + MOVO v0, t0; \ + PMULULQ v2, t0; \ + PADDQ v2, v0; \ + PADDQ t0, v0; \ + PADDQ t0, v0; \ + PXOR v0, v6; \ + PSHUFB c48, v6; \ + MOVO v4, t0; \ + PMULULQ v6, t0; \ + PADDQ v6, v4; \ + PADDQ t0, v4; \ + PADDQ t0, v4; \ + PXOR v4, v2; \ + MOVO v2, t0; \ + PADDQ v2, t0; \ + PSRLQ $63, v2; \ + PXOR t0, v2; \ + MOVO v1, t0; \ + PMULULQ v3, t0; \ + PADDQ v3, v1; \ + PADDQ t0, v1; \ + PADDQ t0, v1; \ + PXOR v1, v7; \ + PSHUFD $0xB1, v7, v7; \ + MOVO v5, t0; \ + PMULULQ v7, t0; \ + PADDQ v7, v5; \ + PADDQ t0, v5; \ + PADDQ t0, v5; \ + PXOR v5, v3; \ + PSHUFB c40, v3; \ + MOVO v1, t0; \ + PMULULQ v3, t0; \ + PADDQ v3, v1; \ + PADDQ t0, v1; \ + PADDQ t0, v1; \ + PXOR v1, v7; \ + PSHUFB c48, v7; \ + MOVO v5, t0; \ + PMULULQ v7, t0; \ + PADDQ v7, v5; \ + PADDQ t0, v5; \ + PADDQ t0, v5; \ + PXOR v5, v3; \ + MOVO v3, t0; \ + PADDQ v3, t0; \ + PSRLQ $63, v3; \ + PXOR t0, v3 + +#define LOAD_MSG_0(block, off) \ + MOVOU 8*(off+0)(block), X0; \ + MOVOU 8*(off+2)(block), X1; \ + MOVOU 8*(off+4)(block), X2; \ + MOVOU 8*(off+6)(block), X3; \ + MOVOU 8*(off+8)(block), X4; \ + MOVOU 8*(off+10)(block), X5; \ + MOVOU 8*(off+12)(block), X6; \ + MOVOU 8*(off+14)(block), X7 + +#define STORE_MSG_0(block, off) \ + MOVOU X0, 8*(off+0)(block); \ + MOVOU X1, 8*(off+2)(block); \ + MOVOU X2, 8*(off+4)(block); \ + MOVOU X3, 8*(off+6)(block); \ + MOVOU X4, 8*(off+8)(block); \ + MOVOU X5, 8*(off+10)(block); \ + MOVOU X6, 8*(off+12)(block); \ + MOVOU X7, 8*(off+14)(block) + +#define LOAD_MSG_1(block, off) \ + MOVOU 8*off+0*8(block), X0; \ + MOVOU 8*off+16*8(block), X1; \ + MOVOU 8*off+32*8(block), X2; \ + MOVOU 8*off+48*8(block), X3; \ + MOVOU 8*off+64*8(block), X4; \ + MOVOU 8*off+80*8(block), X5; \ + MOVOU 8*off+96*8(block), X6; \ + MOVOU 8*off+112*8(block), X7 + +#define STORE_MSG_1(block, off) \ + MOVOU X0, 8*off+0*8(block); \ + MOVOU X1, 8*off+16*8(block); \ + MOVOU X2, 8*off+32*8(block); \ + MOVOU X3, 8*off+48*8(block); \ + MOVOU X4, 8*off+64*8(block); \ + MOVOU X5, 8*off+80*8(block); \ + MOVOU X6, 8*off+96*8(block); \ + MOVOU X7, 8*off+112*8(block) + +#define BLAMKA_ROUND_0(block, off, t0, t1, c40, c48) \ + LOAD_MSG_0(block, off); \ + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ + SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \ + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \ + STORE_MSG_0(block, off) + +#define BLAMKA_ROUND_1(block, off, t0, t1, c40, c48) \ + LOAD_MSG_1(block, off); \ + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ + SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \ + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \ + STORE_MSG_1(block, off) + +// func blamkaSSE4(b *block) +TEXT ·blamkaSSE4(SB), 4, $0-8 + MOVQ b+0(FP), AX + + MOVOU ·c40<>(SB), X10 + MOVOU ·c48<>(SB), X11 + + BLAMKA_ROUND_0(AX, 0, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 16, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 32, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 48, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 64, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 80, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 96, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 112, X8, X9, X10, X11) + + BLAMKA_ROUND_1(AX, 0, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 2, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 4, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 6, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 8, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 10, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 12, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 14, X8, X9, X10, X11) + RET + +// func mixBlocksSSE2(out, a, b, c *block) +TEXT ·mixBlocksSSE2(SB), 4, $0-32 + MOVQ out+0(FP), DX + MOVQ a+8(FP), AX + MOVQ b+16(FP), BX + MOVQ c+24(FP), CX + MOVQ $128, DI + +loop: + MOVOU 0(AX), X0 + MOVOU 0(BX), X1 + MOVOU 0(CX), X2 + PXOR X1, X0 + PXOR X2, X0 + MOVOU X0, 0(DX) + ADDQ $16, AX + ADDQ $16, BX + ADDQ $16, CX + ADDQ $16, DX + SUBQ $2, DI + JA loop + RET + +// func xorBlocksSSE2(out, a, b, c *block) +TEXT ·xorBlocksSSE2(SB), 4, $0-32 + MOVQ out+0(FP), DX + MOVQ a+8(FP), AX + MOVQ b+16(FP), BX + MOVQ c+24(FP), CX + MOVQ $128, DI + +loop: + MOVOU 0(AX), X0 + MOVOU 0(BX), X1 + MOVOU 0(CX), X2 + MOVOU 0(DX), X3 + PXOR X1, X0 + PXOR X2, X0 + PXOR X3, X0 + MOVOU X0, 0(DX) + ADDQ $16, AX + ADDQ $16, BX + ADDQ $16, CX + ADDQ $16, DX + SUBQ $2, DI + JA loop + RET diff --git a/vendor/golang.org/x/crypto/argon2/blamka_generic.go b/vendor/golang.org/x/crypto/argon2/blamka_generic.go new file mode 100644 index 00000000..a481b224 --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_generic.go @@ -0,0 +1,163 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package argon2 + +var useSSE4 bool + +func processBlockGeneric(out, in1, in2 *block, xor bool) { + var t block + for i := range t { + t[i] = in1[i] ^ in2[i] + } + for i := 0; i < blockLength; i += 16 { + blamkaGeneric( + &t[i+0], &t[i+1], &t[i+2], &t[i+3], + &t[i+4], &t[i+5], &t[i+6], &t[i+7], + &t[i+8], &t[i+9], &t[i+10], &t[i+11], + &t[i+12], &t[i+13], &t[i+14], &t[i+15], + ) + } + for i := 0; i < blockLength/8; i += 2 { + blamkaGeneric( + &t[i], &t[i+1], &t[16+i], &t[16+i+1], + &t[32+i], &t[32+i+1], &t[48+i], &t[48+i+1], + &t[64+i], &t[64+i+1], &t[80+i], &t[80+i+1], + &t[96+i], &t[96+i+1], &t[112+i], &t[112+i+1], + ) + } + if xor { + for i := range t { + out[i] ^= in1[i] ^ in2[i] ^ t[i] + } + } else { + for i := range t { + out[i] = in1[i] ^ in2[i] ^ t[i] + } + } +} + +func blamkaGeneric(t00, t01, t02, t03, t04, t05, t06, t07, t08, t09, t10, t11, t12, t13, t14, t15 *uint64) { + v00, v01, v02, v03 := *t00, *t01, *t02, *t03 + v04, v05, v06, v07 := *t04, *t05, *t06, *t07 + v08, v09, v10, v11 := *t08, *t09, *t10, *t11 + v12, v13, v14, v15 := *t12, *t13, *t14, *t15 + + v00 += v04 + 2*uint64(uint32(v00))*uint64(uint32(v04)) + v12 ^= v00 + v12 = v12>>32 | v12<<32 + v08 += v12 + 2*uint64(uint32(v08))*uint64(uint32(v12)) + v04 ^= v08 + v04 = v04>>24 | v04<<40 + + v00 += v04 + 2*uint64(uint32(v00))*uint64(uint32(v04)) + v12 ^= v00 + v12 = v12>>16 | v12<<48 + v08 += v12 + 2*uint64(uint32(v08))*uint64(uint32(v12)) + v04 ^= v08 + v04 = v04>>63 | v04<<1 + + v01 += v05 + 2*uint64(uint32(v01))*uint64(uint32(v05)) + v13 ^= v01 + v13 = v13>>32 | v13<<32 + v09 += v13 + 2*uint64(uint32(v09))*uint64(uint32(v13)) + v05 ^= v09 + v05 = v05>>24 | v05<<40 + + v01 += v05 + 2*uint64(uint32(v01))*uint64(uint32(v05)) + v13 ^= v01 + v13 = v13>>16 | v13<<48 + v09 += v13 + 2*uint64(uint32(v09))*uint64(uint32(v13)) + v05 ^= v09 + v05 = v05>>63 | v05<<1 + + v02 += v06 + 2*uint64(uint32(v02))*uint64(uint32(v06)) + v14 ^= v02 + v14 = v14>>32 | v14<<32 + v10 += v14 + 2*uint64(uint32(v10))*uint64(uint32(v14)) + v06 ^= v10 + v06 = v06>>24 | v06<<40 + + v02 += v06 + 2*uint64(uint32(v02))*uint64(uint32(v06)) + v14 ^= v02 + v14 = v14>>16 | v14<<48 + v10 += v14 + 2*uint64(uint32(v10))*uint64(uint32(v14)) + v06 ^= v10 + v06 = v06>>63 | v06<<1 + + v03 += v07 + 2*uint64(uint32(v03))*uint64(uint32(v07)) + v15 ^= v03 + v15 = v15>>32 | v15<<32 + v11 += v15 + 2*uint64(uint32(v11))*uint64(uint32(v15)) + v07 ^= v11 + v07 = v07>>24 | v07<<40 + + v03 += v07 + 2*uint64(uint32(v03))*uint64(uint32(v07)) + v15 ^= v03 + v15 = v15>>16 | v15<<48 + v11 += v15 + 2*uint64(uint32(v11))*uint64(uint32(v15)) + v07 ^= v11 + v07 = v07>>63 | v07<<1 + + v00 += v05 + 2*uint64(uint32(v00))*uint64(uint32(v05)) + v15 ^= v00 + v15 = v15>>32 | v15<<32 + v10 += v15 + 2*uint64(uint32(v10))*uint64(uint32(v15)) + v05 ^= v10 + v05 = v05>>24 | v05<<40 + + v00 += v05 + 2*uint64(uint32(v00))*uint64(uint32(v05)) + v15 ^= v00 + v15 = v15>>16 | v15<<48 + v10 += v15 + 2*uint64(uint32(v10))*uint64(uint32(v15)) + v05 ^= v10 + v05 = v05>>63 | v05<<1 + + v01 += v06 + 2*uint64(uint32(v01))*uint64(uint32(v06)) + v12 ^= v01 + v12 = v12>>32 | v12<<32 + v11 += v12 + 2*uint64(uint32(v11))*uint64(uint32(v12)) + v06 ^= v11 + v06 = v06>>24 | v06<<40 + + v01 += v06 + 2*uint64(uint32(v01))*uint64(uint32(v06)) + v12 ^= v01 + v12 = v12>>16 | v12<<48 + v11 += v12 + 2*uint64(uint32(v11))*uint64(uint32(v12)) + v06 ^= v11 + v06 = v06>>63 | v06<<1 + + v02 += v07 + 2*uint64(uint32(v02))*uint64(uint32(v07)) + v13 ^= v02 + v13 = v13>>32 | v13<<32 + v08 += v13 + 2*uint64(uint32(v08))*uint64(uint32(v13)) + v07 ^= v08 + v07 = v07>>24 | v07<<40 + + v02 += v07 + 2*uint64(uint32(v02))*uint64(uint32(v07)) + v13 ^= v02 + v13 = v13>>16 | v13<<48 + v08 += v13 + 2*uint64(uint32(v08))*uint64(uint32(v13)) + v07 ^= v08 + v07 = v07>>63 | v07<<1 + + v03 += v04 + 2*uint64(uint32(v03))*uint64(uint32(v04)) + v14 ^= v03 + v14 = v14>>32 | v14<<32 + v09 += v14 + 2*uint64(uint32(v09))*uint64(uint32(v14)) + v04 ^= v09 + v04 = v04>>24 | v04<<40 + + v03 += v04 + 2*uint64(uint32(v03))*uint64(uint32(v04)) + v14 ^= v03 + v14 = v14>>16 | v14<<48 + v09 += v14 + 2*uint64(uint32(v09))*uint64(uint32(v14)) + v04 ^= v09 + v04 = v04>>63 | v04<<1 + + *t00, *t01, *t02, *t03 = v00, v01, v02, v03 + *t04, *t05, *t06, *t07 = v04, v05, v06, v07 + *t08, *t09, *t10, *t11 = v08, v09, v10, v11 + *t12, *t13, *t14, *t15 = v12, v13, v14, v15 +} diff --git a/vendor/golang.org/x/crypto/argon2/blamka_ref.go b/vendor/golang.org/x/crypto/argon2/blamka_ref.go new file mode 100644 index 00000000..16d58c65 --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_ref.go @@ -0,0 +1,15 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !amd64 || purego || !gc + +package argon2 + +func processBlock(out, in1, in2 *block) { + processBlockGeneric(out, in1, in2, false) +} + +func processBlockXOR(out, in1, in2 *block) { + processBlockGeneric(out, in1, in2, true) +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b.go b/vendor/golang.org/x/crypto/blake2b/blake2b.go new file mode 100644 index 00000000..d2e98d42 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b.go @@ -0,0 +1,291 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package blake2b implements the BLAKE2b hash algorithm defined by RFC 7693 +// and the extendable output function (XOF) BLAKE2Xb. +// +// BLAKE2b is optimized for 64-bit platforms—including NEON-enabled ARMs—and +// produces digests of any size between 1 and 64 bytes. +// For a detailed specification of BLAKE2b see https://blake2.net/blake2.pdf +// and for BLAKE2Xb see https://blake2.net/blake2x.pdf +// +// If you aren't sure which function you need, use BLAKE2b (Sum512 or New512). +// If you need a secret-key MAC (message authentication code), use the New512 +// function with a non-nil key. +// +// BLAKE2X is a construction to compute hash values larger than 64 bytes. It +// can produce hash values between 0 and 4 GiB. +package blake2b + +import ( + "encoding/binary" + "errors" + "hash" +) + +const ( + // The blocksize of BLAKE2b in bytes. + BlockSize = 128 + // The hash size of BLAKE2b-512 in bytes. + Size = 64 + // The hash size of BLAKE2b-384 in bytes. + Size384 = 48 + // The hash size of BLAKE2b-256 in bytes. + Size256 = 32 +) + +var ( + useAVX2 bool + useAVX bool + useSSE4 bool +) + +var ( + errKeySize = errors.New("blake2b: invalid key size") + errHashSize = errors.New("blake2b: invalid hash size") +) + +var iv = [8]uint64{ + 0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1, + 0x510e527fade682d1, 0x9b05688c2b3e6c1f, 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179, +} + +// Sum512 returns the BLAKE2b-512 checksum of the data. +func Sum512(data []byte) [Size]byte { + var sum [Size]byte + checkSum(&sum, Size, data) + return sum +} + +// Sum384 returns the BLAKE2b-384 checksum of the data. +func Sum384(data []byte) [Size384]byte { + var sum [Size]byte + var sum384 [Size384]byte + checkSum(&sum, Size384, data) + copy(sum384[:], sum[:Size384]) + return sum384 +} + +// Sum256 returns the BLAKE2b-256 checksum of the data. +func Sum256(data []byte) [Size256]byte { + var sum [Size]byte + var sum256 [Size256]byte + checkSum(&sum, Size256, data) + copy(sum256[:], sum[:Size256]) + return sum256 +} + +// New512 returns a new hash.Hash computing the BLAKE2b-512 checksum. A non-nil +// key turns the hash into a MAC. The key must be between zero and 64 bytes long. +func New512(key []byte) (hash.Hash, error) { return newDigest(Size, key) } + +// New384 returns a new hash.Hash computing the BLAKE2b-384 checksum. A non-nil +// key turns the hash into a MAC. The key must be between zero and 64 bytes long. +func New384(key []byte) (hash.Hash, error) { return newDigest(Size384, key) } + +// New256 returns a new hash.Hash computing the BLAKE2b-256 checksum. A non-nil +// key turns the hash into a MAC. The key must be between zero and 64 bytes long. +func New256(key []byte) (hash.Hash, error) { return newDigest(Size256, key) } + +// New returns a new hash.Hash computing the BLAKE2b checksum with a custom length. +// A non-nil key turns the hash into a MAC. The key must be between zero and 64 bytes long. +// The hash size can be a value between 1 and 64 but it is highly recommended to use +// values equal or greater than: +// - 32 if BLAKE2b is used as a hash function (The key is zero bytes long). +// - 16 if BLAKE2b is used as a MAC function (The key is at least 16 bytes long). +// When the key is nil, the returned hash.Hash implements BinaryMarshaler +// and BinaryUnmarshaler for state (de)serialization as documented by hash.Hash. +func New(size int, key []byte) (hash.Hash, error) { return newDigest(size, key) } + +func newDigest(hashSize int, key []byte) (*digest, error) { + if hashSize < 1 || hashSize > Size { + return nil, errHashSize + } + if len(key) > Size { + return nil, errKeySize + } + d := &digest{ + size: hashSize, + keyLen: len(key), + } + copy(d.key[:], key) + d.Reset() + return d, nil +} + +func checkSum(sum *[Size]byte, hashSize int, data []byte) { + h := iv + h[0] ^= uint64(hashSize) | (1 << 16) | (1 << 24) + var c [2]uint64 + + if length := len(data); length > BlockSize { + n := length &^ (BlockSize - 1) + if length == n { + n -= BlockSize + } + hashBlocks(&h, &c, 0, data[:n]) + data = data[n:] + } + + var block [BlockSize]byte + offset := copy(block[:], data) + remaining := uint64(BlockSize - offset) + if c[0] < remaining { + c[1]-- + } + c[0] -= remaining + + hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:]) + + for i, v := range h[:(hashSize+7)/8] { + binary.LittleEndian.PutUint64(sum[8*i:], v) + } +} + +type digest struct { + h [8]uint64 + c [2]uint64 + size int + block [BlockSize]byte + offset int + + key [BlockSize]byte + keyLen int +} + +const ( + magic = "b2b" + marshaledSize = len(magic) + 8*8 + 2*8 + 1 + BlockSize + 1 +) + +func (d *digest) MarshalBinary() ([]byte, error) { + if d.keyLen != 0 { + return nil, errors.New("crypto/blake2b: cannot marshal MACs") + } + b := make([]byte, 0, marshaledSize) + b = append(b, magic...) + for i := 0; i < 8; i++ { + b = appendUint64(b, d.h[i]) + } + b = appendUint64(b, d.c[0]) + b = appendUint64(b, d.c[1]) + // Maximum value for size is 64 + b = append(b, byte(d.size)) + b = append(b, d.block[:]...) + b = append(b, byte(d.offset)) + return b, nil +} + +func (d *digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic) || string(b[:len(magic)]) != magic { + return errors.New("crypto/blake2b: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("crypto/blake2b: invalid hash state size") + } + b = b[len(magic):] + for i := 0; i < 8; i++ { + b, d.h[i] = consumeUint64(b) + } + b, d.c[0] = consumeUint64(b) + b, d.c[1] = consumeUint64(b) + d.size = int(b[0]) + b = b[1:] + copy(d.block[:], b[:BlockSize]) + b = b[BlockSize:] + d.offset = int(b[0]) + return nil +} + +func (d *digest) BlockSize() int { return BlockSize } + +func (d *digest) Size() int { return d.size } + +func (d *digest) Reset() { + d.h = iv + d.h[0] ^= uint64(d.size) | (uint64(d.keyLen) << 8) | (1 << 16) | (1 << 24) + d.offset, d.c[0], d.c[1] = 0, 0, 0 + if d.keyLen > 0 { + d.block = d.key + d.offset = BlockSize + } +} + +func (d *digest) Write(p []byte) (n int, err error) { + n = len(p) + + if d.offset > 0 { + remaining := BlockSize - d.offset + if n <= remaining { + d.offset += copy(d.block[d.offset:], p) + return + } + copy(d.block[d.offset:], p[:remaining]) + hashBlocks(&d.h, &d.c, 0, d.block[:]) + d.offset = 0 + p = p[remaining:] + } + + if length := len(p); length > BlockSize { + nn := length &^ (BlockSize - 1) + if length == nn { + nn -= BlockSize + } + hashBlocks(&d.h, &d.c, 0, p[:nn]) + p = p[nn:] + } + + if len(p) > 0 { + d.offset += copy(d.block[:], p) + } + + return +} + +func (d *digest) Sum(sum []byte) []byte { + var hash [Size]byte + d.finalize(&hash) + return append(sum, hash[:d.size]...) +} + +func (d *digest) finalize(hash *[Size]byte) { + var block [BlockSize]byte + copy(block[:], d.block[:d.offset]) + remaining := uint64(BlockSize - d.offset) + + c := d.c + if c[0] < remaining { + c[1]-- + } + c[0] -= remaining + + h := d.h + hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:]) + + for i, v := range h { + binary.LittleEndian.PutUint64(hash[8*i:], v) + } +} + +func appendUint64(b []byte, x uint64) []byte { + var a [8]byte + binary.BigEndian.PutUint64(a[:], x) + return append(b, a[:]...) +} + +func appendUint32(b []byte, x uint32) []byte { + var a [4]byte + binary.BigEndian.PutUint32(a[:], x) + return append(b, a[:]...) +} + +func consumeUint64(b []byte) ([]byte, uint64) { + x := binary.BigEndian.Uint64(b) + return b[8:], x +} + +func consumeUint32(b []byte) ([]byte, uint32) { + x := binary.BigEndian.Uint32(b) + return b[4:], x +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go new file mode 100644 index 00000000..199c21d2 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go @@ -0,0 +1,37 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && gc && !purego + +package blake2b + +import "golang.org/x/sys/cpu" + +func init() { + useAVX2 = cpu.X86.HasAVX2 + useAVX = cpu.X86.HasAVX + useSSE4 = cpu.X86.HasSSE41 +} + +//go:noescape +func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +//go:noescape +func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +//go:noescape +func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + switch { + case useAVX2: + hashBlocksAVX2(h, c, flag, blocks) + case useAVX: + hashBlocksAVX(h, c, flag, blocks) + case useSSE4: + hashBlocksSSE4(h, c, flag, blocks) + default: + hashBlocksGeneric(h, c, flag, blocks) + } +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s new file mode 100644 index 00000000..9ae8206c --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s @@ -0,0 +1,744 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && gc && !purego + +#include "textflag.h" + +DATA ·AVX2_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 +DATA ·AVX2_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b +DATA ·AVX2_iv0<>+0x10(SB)/8, $0x3c6ef372fe94f82b +DATA ·AVX2_iv0<>+0x18(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·AVX2_iv0<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX2_iv1<>+0x00(SB)/8, $0x510e527fade682d1 +DATA ·AVX2_iv1<>+0x08(SB)/8, $0x9b05688c2b3e6c1f +DATA ·AVX2_iv1<>+0x10(SB)/8, $0x1f83d9abfb41bd6b +DATA ·AVX2_iv1<>+0x18(SB)/8, $0x5be0cd19137e2179 +GLOBL ·AVX2_iv1<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX2_c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·AVX2_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +DATA ·AVX2_c40<>+0x10(SB)/8, $0x0201000706050403 +DATA ·AVX2_c40<>+0x18(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·AVX2_c40<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX2_c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·AVX2_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +DATA ·AVX2_c48<>+0x10(SB)/8, $0x0100070605040302 +DATA ·AVX2_c48<>+0x18(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·AVX2_c48<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 +DATA ·AVX_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b +GLOBL ·AVX_iv0<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b +DATA ·AVX_iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·AVX_iv1<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_iv2<>+0x00(SB)/8, $0x510e527fade682d1 +DATA ·AVX_iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f +GLOBL ·AVX_iv2<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b +DATA ·AVX_iv3<>+0x08(SB)/8, $0x5be0cd19137e2179 +GLOBL ·AVX_iv3<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·AVX_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·AVX_c40<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·AVX_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·AVX_c48<>(SB), (NOPTR+RODATA), $16 + +#define VPERMQ_0x39_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x39 +#define VPERMQ_0x93_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x93 +#define VPERMQ_0x4E_Y2_Y2 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xd2; BYTE $0x4e +#define VPERMQ_0x93_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x93 +#define VPERMQ_0x39_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x39 + +#define ROUND_AVX2(m0, m1, m2, m3, t, c40, c48) \ + VPADDQ m0, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFD $-79, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPSHUFB c40, Y1, Y1; \ + VPADDQ m1, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFB c48, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPADDQ Y1, Y1, t; \ + VPSRLQ $63, Y1, Y1; \ + VPXOR t, Y1, Y1; \ + VPERMQ_0x39_Y1_Y1; \ + VPERMQ_0x4E_Y2_Y2; \ + VPERMQ_0x93_Y3_Y3; \ + VPADDQ m2, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFD $-79, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPSHUFB c40, Y1, Y1; \ + VPADDQ m3, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFB c48, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPADDQ Y1, Y1, t; \ + VPSRLQ $63, Y1, Y1; \ + VPXOR t, Y1, Y1; \ + VPERMQ_0x39_Y3_Y3; \ + VPERMQ_0x4E_Y2_Y2; \ + VPERMQ_0x93_Y1_Y1 + +#define VMOVQ_SI_X11_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x1E +#define VMOVQ_SI_X12_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x26 +#define VMOVQ_SI_X13_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x2E +#define VMOVQ_SI_X14_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x36 +#define VMOVQ_SI_X15_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x3E + +#define VMOVQ_SI_X11(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x5E; BYTE $n +#define VMOVQ_SI_X12(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x66; BYTE $n +#define VMOVQ_SI_X13(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x6E; BYTE $n +#define VMOVQ_SI_X14(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x76; BYTE $n +#define VMOVQ_SI_X15(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x7E; BYTE $n + +#define VPINSRQ_1_SI_X11_0 BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x1E; BYTE $0x01 +#define VPINSRQ_1_SI_X12_0 BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x26; BYTE $0x01 +#define VPINSRQ_1_SI_X13_0 BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x2E; BYTE $0x01 +#define VPINSRQ_1_SI_X14_0 BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x36; BYTE $0x01 +#define VPINSRQ_1_SI_X15_0 BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x3E; BYTE $0x01 + +#define VPINSRQ_1_SI_X11(n) BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x5E; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X12(n) BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x66; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X13(n) BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x6E; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X14(n) BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x76; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X15(n) BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x7E; BYTE $n; BYTE $0x01 + +#define VMOVQ_R8_X15 BYTE $0xC4; BYTE $0x41; BYTE $0xF9; BYTE $0x6E; BYTE $0xF8 +#define VPINSRQ_1_R9_X15 BYTE $0xC4; BYTE $0x43; BYTE $0x81; BYTE $0x22; BYTE $0xF9; BYTE $0x01 + +// load msg: Y12 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y12(i0, i1, i2, i3) \ + VMOVQ_SI_X12(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X12(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y12, Y12 + +// load msg: Y13 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y13(i0, i1, i2, i3) \ + VMOVQ_SI_X13(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X13(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y13, Y13 + +// load msg: Y14 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y14(i0, i1, i2, i3) \ + VMOVQ_SI_X14(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X14(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y14, Y14 + +// load msg: Y15 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y15(i0, i1, i2, i3) \ + VMOVQ_SI_X15(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X15(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() \ + VMOVQ_SI_X12_0; \ + VMOVQ_SI_X11(4*8); \ + VPINSRQ_1_SI_X12(2*8); \ + VPINSRQ_1_SI_X11(6*8); \ + VINSERTI128 $1, X11, Y12, Y12; \ + LOAD_MSG_AVX2_Y13(1, 3, 5, 7); \ + LOAD_MSG_AVX2_Y14(8, 10, 12, 14); \ + LOAD_MSG_AVX2_Y15(9, 11, 13, 15) + +#define LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() \ + LOAD_MSG_AVX2_Y12(14, 4, 9, 13); \ + LOAD_MSG_AVX2_Y13(10, 8, 15, 6); \ + VMOVQ_SI_X11(11*8); \ + VPSHUFD $0x4E, 0*8(SI), X14; \ + VPINSRQ_1_SI_X11(5*8); \ + VINSERTI128 $1, X11, Y14, Y14; \ + LOAD_MSG_AVX2_Y15(12, 2, 7, 3) + +#define LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() \ + VMOVQ_SI_X11(5*8); \ + VMOVDQU 11*8(SI), X12; \ + VPINSRQ_1_SI_X11(15*8); \ + VINSERTI128 $1, X11, Y12, Y12; \ + VMOVQ_SI_X13(8*8); \ + VMOVQ_SI_X11(2*8); \ + VPINSRQ_1_SI_X13_0; \ + VPINSRQ_1_SI_X11(13*8); \ + VINSERTI128 $1, X11, Y13, Y13; \ + LOAD_MSG_AVX2_Y14(10, 3, 7, 9); \ + LOAD_MSG_AVX2_Y15(14, 6, 1, 4) + +#define LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() \ + LOAD_MSG_AVX2_Y12(7, 3, 13, 11); \ + LOAD_MSG_AVX2_Y13(9, 1, 12, 14); \ + LOAD_MSG_AVX2_Y14(2, 5, 4, 15); \ + VMOVQ_SI_X15(6*8); \ + VMOVQ_SI_X11_0; \ + VPINSRQ_1_SI_X15(10*8); \ + VPINSRQ_1_SI_X11(8*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() \ + LOAD_MSG_AVX2_Y12(9, 5, 2, 10); \ + VMOVQ_SI_X13_0; \ + VMOVQ_SI_X11(4*8); \ + VPINSRQ_1_SI_X13(7*8); \ + VPINSRQ_1_SI_X11(15*8); \ + VINSERTI128 $1, X11, Y13, Y13; \ + LOAD_MSG_AVX2_Y14(14, 11, 6, 3); \ + LOAD_MSG_AVX2_Y15(1, 12, 8, 13) + +#define LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() \ + VMOVQ_SI_X12(2*8); \ + VMOVQ_SI_X11_0; \ + VPINSRQ_1_SI_X12(6*8); \ + VPINSRQ_1_SI_X11(8*8); \ + VINSERTI128 $1, X11, Y12, Y12; \ + LOAD_MSG_AVX2_Y13(12, 10, 11, 3); \ + LOAD_MSG_AVX2_Y14(4, 7, 15, 1); \ + LOAD_MSG_AVX2_Y15(13, 5, 14, 9) + +#define LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() \ + LOAD_MSG_AVX2_Y12(12, 1, 14, 4); \ + LOAD_MSG_AVX2_Y13(5, 15, 13, 10); \ + VMOVQ_SI_X14_0; \ + VPSHUFD $0x4E, 8*8(SI), X11; \ + VPINSRQ_1_SI_X14(6*8); \ + VINSERTI128 $1, X11, Y14, Y14; \ + LOAD_MSG_AVX2_Y15(7, 3, 2, 11) + +#define LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() \ + LOAD_MSG_AVX2_Y12(13, 7, 12, 3); \ + LOAD_MSG_AVX2_Y13(11, 14, 1, 9); \ + LOAD_MSG_AVX2_Y14(5, 15, 8, 2); \ + VMOVQ_SI_X15_0; \ + VMOVQ_SI_X11(6*8); \ + VPINSRQ_1_SI_X15(4*8); \ + VPINSRQ_1_SI_X11(10*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() \ + VMOVQ_SI_X12(6*8); \ + VMOVQ_SI_X11(11*8); \ + VPINSRQ_1_SI_X12(14*8); \ + VPINSRQ_1_SI_X11_0; \ + VINSERTI128 $1, X11, Y12, Y12; \ + LOAD_MSG_AVX2_Y13(15, 9, 3, 8); \ + VMOVQ_SI_X11(1*8); \ + VMOVDQU 12*8(SI), X14; \ + VPINSRQ_1_SI_X11(10*8); \ + VINSERTI128 $1, X11, Y14, Y14; \ + VMOVQ_SI_X15(2*8); \ + VMOVDQU 4*8(SI), X11; \ + VPINSRQ_1_SI_X15(7*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() \ + LOAD_MSG_AVX2_Y12(10, 8, 7, 1); \ + VMOVQ_SI_X13(2*8); \ + VPSHUFD $0x4E, 5*8(SI), X11; \ + VPINSRQ_1_SI_X13(4*8); \ + VINSERTI128 $1, X11, Y13, Y13; \ + LOAD_MSG_AVX2_Y14(15, 9, 3, 13); \ + VMOVQ_SI_X15(11*8); \ + VMOVQ_SI_X11(12*8); \ + VPINSRQ_1_SI_X15(14*8); \ + VPINSRQ_1_SI_X11_0; \ + VINSERTI128 $1, X11, Y15, Y15 + +// func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) +TEXT ·hashBlocksAVX2(SB), 4, $320-48 // frame size = 288 + 32 byte alignment + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + + MOVQ SP, DX + ADDQ $31, DX + ANDQ $~31, DX + + MOVQ CX, 16(DX) + XORQ CX, CX + MOVQ CX, 24(DX) + + VMOVDQU ·AVX2_c40<>(SB), Y4 + VMOVDQU ·AVX2_c48<>(SB), Y5 + + VMOVDQU 0(AX), Y8 + VMOVDQU 32(AX), Y9 + VMOVDQU ·AVX2_iv0<>(SB), Y6 + VMOVDQU ·AVX2_iv1<>(SB), Y7 + + MOVQ 0(BX), R8 + MOVQ 8(BX), R9 + MOVQ R9, 8(DX) + +loop: + ADDQ $128, R8 + MOVQ R8, 0(DX) + CMPQ R8, $128 + JGE noinc + INCQ R9 + MOVQ R9, 8(DX) + +noinc: + VMOVDQA Y8, Y0 + VMOVDQA Y9, Y1 + VMOVDQA Y6, Y2 + VPXOR 0(DX), Y7, Y3 + + LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() + VMOVDQA Y12, 32(DX) + VMOVDQA Y13, 64(DX) + VMOVDQA Y14, 96(DX) + VMOVDQA Y15, 128(DX) + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() + VMOVDQA Y12, 160(DX) + VMOVDQA Y13, 192(DX) + VMOVDQA Y14, 224(DX) + VMOVDQA Y15, 256(DX) + + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + + ROUND_AVX2(32(DX), 64(DX), 96(DX), 128(DX), Y10, Y4, Y5) + ROUND_AVX2(160(DX), 192(DX), 224(DX), 256(DX), Y10, Y4, Y5) + + VPXOR Y0, Y8, Y8 + VPXOR Y1, Y9, Y9 + VPXOR Y2, Y8, Y8 + VPXOR Y3, Y9, Y9 + + LEAQ 128(SI), SI + SUBQ $128, DI + JNE loop + + MOVQ R8, 0(BX) + MOVQ R9, 8(BX) + + VMOVDQU Y8, 0(AX) + VMOVDQU Y9, 32(AX) + VZEROUPPER + + RET + +#define VPUNPCKLQDQ_X2_X2_X15 BYTE $0xC5; BYTE $0x69; BYTE $0x6C; BYTE $0xFA +#define VPUNPCKLQDQ_X3_X3_X15 BYTE $0xC5; BYTE $0x61; BYTE $0x6C; BYTE $0xFB +#define VPUNPCKLQDQ_X7_X7_X15 BYTE $0xC5; BYTE $0x41; BYTE $0x6C; BYTE $0xFF +#define VPUNPCKLQDQ_X13_X13_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x11; BYTE $0x6C; BYTE $0xFD +#define VPUNPCKLQDQ_X14_X14_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x09; BYTE $0x6C; BYTE $0xFE + +#define VPUNPCKHQDQ_X15_X2_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x69; BYTE $0x6D; BYTE $0xD7 +#define VPUNPCKHQDQ_X15_X3_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xDF +#define VPUNPCKHQDQ_X15_X6_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x49; BYTE $0x6D; BYTE $0xF7 +#define VPUNPCKHQDQ_X15_X7_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xFF +#define VPUNPCKHQDQ_X15_X3_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xD7 +#define VPUNPCKHQDQ_X15_X7_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xF7 +#define VPUNPCKHQDQ_X15_X13_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xDF +#define VPUNPCKHQDQ_X15_X13_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xFF + +#define SHUFFLE_AVX() \ + VMOVDQA X6, X13; \ + VMOVDQA X2, X14; \ + VMOVDQA X4, X6; \ + VPUNPCKLQDQ_X13_X13_X15; \ + VMOVDQA X5, X4; \ + VMOVDQA X6, X5; \ + VPUNPCKHQDQ_X15_X7_X6; \ + VPUNPCKLQDQ_X7_X7_X15; \ + VPUNPCKHQDQ_X15_X13_X7; \ + VPUNPCKLQDQ_X3_X3_X15; \ + VPUNPCKHQDQ_X15_X2_X2; \ + VPUNPCKLQDQ_X14_X14_X15; \ + VPUNPCKHQDQ_X15_X3_X3; \ + +#define SHUFFLE_AVX_INV() \ + VMOVDQA X2, X13; \ + VMOVDQA X4, X14; \ + VPUNPCKLQDQ_X2_X2_X15; \ + VMOVDQA X5, X4; \ + VPUNPCKHQDQ_X15_X3_X2; \ + VMOVDQA X14, X5; \ + VPUNPCKLQDQ_X3_X3_X15; \ + VMOVDQA X6, X14; \ + VPUNPCKHQDQ_X15_X13_X3; \ + VPUNPCKLQDQ_X7_X7_X15; \ + VPUNPCKHQDQ_X15_X6_X6; \ + VPUNPCKLQDQ_X14_X14_X15; \ + VPUNPCKHQDQ_X15_X7_X7; \ + +#define HALF_ROUND_AVX(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \ + VPADDQ m0, v0, v0; \ + VPADDQ v2, v0, v0; \ + VPADDQ m1, v1, v1; \ + VPADDQ v3, v1, v1; \ + VPXOR v0, v6, v6; \ + VPXOR v1, v7, v7; \ + VPSHUFD $-79, v6, v6; \ + VPSHUFD $-79, v7, v7; \ + VPADDQ v6, v4, v4; \ + VPADDQ v7, v5, v5; \ + VPXOR v4, v2, v2; \ + VPXOR v5, v3, v3; \ + VPSHUFB c40, v2, v2; \ + VPSHUFB c40, v3, v3; \ + VPADDQ m2, v0, v0; \ + VPADDQ v2, v0, v0; \ + VPADDQ m3, v1, v1; \ + VPADDQ v3, v1, v1; \ + VPXOR v0, v6, v6; \ + VPXOR v1, v7, v7; \ + VPSHUFB c48, v6, v6; \ + VPSHUFB c48, v7, v7; \ + VPADDQ v6, v4, v4; \ + VPADDQ v7, v5, v5; \ + VPXOR v4, v2, v2; \ + VPXOR v5, v3, v3; \ + VPADDQ v2, v2, t0; \ + VPSRLQ $63, v2, v2; \ + VPXOR t0, v2, v2; \ + VPADDQ v3, v3, t0; \ + VPSRLQ $63, v3, v3; \ + VPXOR t0, v3, v3 + +// load msg: X12 = (i0, i1), X13 = (i2, i3), X14 = (i4, i5), X15 = (i6, i7) +// i0, i1, i2, i3, i4, i5, i6, i7 must not be 0 +#define LOAD_MSG_AVX(i0, i1, i2, i3, i4, i5, i6, i7) \ + VMOVQ_SI_X12(i0*8); \ + VMOVQ_SI_X13(i2*8); \ + VMOVQ_SI_X14(i4*8); \ + VMOVQ_SI_X15(i6*8); \ + VPINSRQ_1_SI_X12(i1*8); \ + VPINSRQ_1_SI_X13(i3*8); \ + VPINSRQ_1_SI_X14(i5*8); \ + VPINSRQ_1_SI_X15(i7*8) + +// load msg: X12 = (0, 2), X13 = (4, 6), X14 = (1, 3), X15 = (5, 7) +#define LOAD_MSG_AVX_0_2_4_6_1_3_5_7() \ + VMOVQ_SI_X12_0; \ + VMOVQ_SI_X13(4*8); \ + VMOVQ_SI_X14(1*8); \ + VMOVQ_SI_X15(5*8); \ + VPINSRQ_1_SI_X12(2*8); \ + VPINSRQ_1_SI_X13(6*8); \ + VPINSRQ_1_SI_X14(3*8); \ + VPINSRQ_1_SI_X15(7*8) + +// load msg: X12 = (1, 0), X13 = (11, 5), X14 = (12, 2), X15 = (7, 3) +#define LOAD_MSG_AVX_1_0_11_5_12_2_7_3() \ + VPSHUFD $0x4E, 0*8(SI), X12; \ + VMOVQ_SI_X13(11*8); \ + VMOVQ_SI_X14(12*8); \ + VMOVQ_SI_X15(7*8); \ + VPINSRQ_1_SI_X13(5*8); \ + VPINSRQ_1_SI_X14(2*8); \ + VPINSRQ_1_SI_X15(3*8) + +// load msg: X12 = (11, 12), X13 = (5, 15), X14 = (8, 0), X15 = (2, 13) +#define LOAD_MSG_AVX_11_12_5_15_8_0_2_13() \ + VMOVDQU 11*8(SI), X12; \ + VMOVQ_SI_X13(5*8); \ + VMOVQ_SI_X14(8*8); \ + VMOVQ_SI_X15(2*8); \ + VPINSRQ_1_SI_X13(15*8); \ + VPINSRQ_1_SI_X14_0; \ + VPINSRQ_1_SI_X15(13*8) + +// load msg: X12 = (2, 5), X13 = (4, 15), X14 = (6, 10), X15 = (0, 8) +#define LOAD_MSG_AVX_2_5_4_15_6_10_0_8() \ + VMOVQ_SI_X12(2*8); \ + VMOVQ_SI_X13(4*8); \ + VMOVQ_SI_X14(6*8); \ + VMOVQ_SI_X15_0; \ + VPINSRQ_1_SI_X12(5*8); \ + VPINSRQ_1_SI_X13(15*8); \ + VPINSRQ_1_SI_X14(10*8); \ + VPINSRQ_1_SI_X15(8*8) + +// load msg: X12 = (9, 5), X13 = (2, 10), X14 = (0, 7), X15 = (4, 15) +#define LOAD_MSG_AVX_9_5_2_10_0_7_4_15() \ + VMOVQ_SI_X12(9*8); \ + VMOVQ_SI_X13(2*8); \ + VMOVQ_SI_X14_0; \ + VMOVQ_SI_X15(4*8); \ + VPINSRQ_1_SI_X12(5*8); \ + VPINSRQ_1_SI_X13(10*8); \ + VPINSRQ_1_SI_X14(7*8); \ + VPINSRQ_1_SI_X15(15*8) + +// load msg: X12 = (2, 6), X13 = (0, 8), X14 = (12, 10), X15 = (11, 3) +#define LOAD_MSG_AVX_2_6_0_8_12_10_11_3() \ + VMOVQ_SI_X12(2*8); \ + VMOVQ_SI_X13_0; \ + VMOVQ_SI_X14(12*8); \ + VMOVQ_SI_X15(11*8); \ + VPINSRQ_1_SI_X12(6*8); \ + VPINSRQ_1_SI_X13(8*8); \ + VPINSRQ_1_SI_X14(10*8); \ + VPINSRQ_1_SI_X15(3*8) + +// load msg: X12 = (0, 6), X13 = (9, 8), X14 = (7, 3), X15 = (2, 11) +#define LOAD_MSG_AVX_0_6_9_8_7_3_2_11() \ + MOVQ 0*8(SI), X12; \ + VPSHUFD $0x4E, 8*8(SI), X13; \ + MOVQ 7*8(SI), X14; \ + MOVQ 2*8(SI), X15; \ + VPINSRQ_1_SI_X12(6*8); \ + VPINSRQ_1_SI_X14(3*8); \ + VPINSRQ_1_SI_X15(11*8) + +// load msg: X12 = (6, 14), X13 = (11, 0), X14 = (15, 9), X15 = (3, 8) +#define LOAD_MSG_AVX_6_14_11_0_15_9_3_8() \ + MOVQ 6*8(SI), X12; \ + MOVQ 11*8(SI), X13; \ + MOVQ 15*8(SI), X14; \ + MOVQ 3*8(SI), X15; \ + VPINSRQ_1_SI_X12(14*8); \ + VPINSRQ_1_SI_X13_0; \ + VPINSRQ_1_SI_X14(9*8); \ + VPINSRQ_1_SI_X15(8*8) + +// load msg: X12 = (5, 15), X13 = (8, 2), X14 = (0, 4), X15 = (6, 10) +#define LOAD_MSG_AVX_5_15_8_2_0_4_6_10() \ + MOVQ 5*8(SI), X12; \ + MOVQ 8*8(SI), X13; \ + MOVQ 0*8(SI), X14; \ + MOVQ 6*8(SI), X15; \ + VPINSRQ_1_SI_X12(15*8); \ + VPINSRQ_1_SI_X13(2*8); \ + VPINSRQ_1_SI_X14(4*8); \ + VPINSRQ_1_SI_X15(10*8) + +// load msg: X12 = (12, 13), X13 = (1, 10), X14 = (2, 7), X15 = (4, 5) +#define LOAD_MSG_AVX_12_13_1_10_2_7_4_5() \ + VMOVDQU 12*8(SI), X12; \ + MOVQ 1*8(SI), X13; \ + MOVQ 2*8(SI), X14; \ + VPINSRQ_1_SI_X13(10*8); \ + VPINSRQ_1_SI_X14(7*8); \ + VMOVDQU 4*8(SI), X15 + +// load msg: X12 = (15, 9), X13 = (3, 13), X14 = (11, 14), X15 = (12, 0) +#define LOAD_MSG_AVX_15_9_3_13_11_14_12_0() \ + MOVQ 15*8(SI), X12; \ + MOVQ 3*8(SI), X13; \ + MOVQ 11*8(SI), X14; \ + MOVQ 12*8(SI), X15; \ + VPINSRQ_1_SI_X12(9*8); \ + VPINSRQ_1_SI_X13(13*8); \ + VPINSRQ_1_SI_X14(14*8); \ + VPINSRQ_1_SI_X15_0 + +// func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) +TEXT ·hashBlocksAVX(SB), 4, $288-48 // frame size = 272 + 16 byte alignment + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + + MOVQ SP, R10 + ADDQ $15, R10 + ANDQ $~15, R10 + + VMOVDQU ·AVX_c40<>(SB), X0 + VMOVDQU ·AVX_c48<>(SB), X1 + VMOVDQA X0, X8 + VMOVDQA X1, X9 + + VMOVDQU ·AVX_iv3<>(SB), X0 + VMOVDQA X0, 0(R10) + XORQ CX, 0(R10) // 0(R10) = ·AVX_iv3 ^ (CX || 0) + + VMOVDQU 0(AX), X10 + VMOVDQU 16(AX), X11 + VMOVDQU 32(AX), X2 + VMOVDQU 48(AX), X3 + + MOVQ 0(BX), R8 + MOVQ 8(BX), R9 + +loop: + ADDQ $128, R8 + CMPQ R8, $128 + JGE noinc + INCQ R9 + +noinc: + VMOVQ_R8_X15 + VPINSRQ_1_R9_X15 + + VMOVDQA X10, X0 + VMOVDQA X11, X1 + VMOVDQU ·AVX_iv0<>(SB), X4 + VMOVDQU ·AVX_iv1<>(SB), X5 + VMOVDQU ·AVX_iv2<>(SB), X6 + + VPXOR X15, X6, X6 + VMOVDQA 0(R10), X7 + + LOAD_MSG_AVX_0_2_4_6_1_3_5_7() + VMOVDQA X12, 16(R10) + VMOVDQA X13, 32(R10) + VMOVDQA X14, 48(R10) + VMOVDQA X15, 64(R10) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(8, 10, 12, 14, 9, 11, 13, 15) + VMOVDQA X12, 80(R10) + VMOVDQA X13, 96(R10) + VMOVDQA X14, 112(R10) + VMOVDQA X15, 128(R10) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(14, 4, 9, 13, 10, 8, 15, 6) + VMOVDQA X12, 144(R10) + VMOVDQA X13, 160(R10) + VMOVDQA X14, 176(R10) + VMOVDQA X15, 192(R10) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_1_0_11_5_12_2_7_3() + VMOVDQA X12, 208(R10) + VMOVDQA X13, 224(R10) + VMOVDQA X14, 240(R10) + VMOVDQA X15, 256(R10) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_11_12_5_15_8_0_2_13() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(10, 3, 7, 9, 14, 6, 1, 4) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(7, 3, 13, 11, 9, 1, 12, 14) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_2_5_4_15_6_10_0_8() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_9_5_2_10_0_7_4_15() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(14, 11, 6, 3, 1, 12, 8, 13) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_2_6_0_8_12_10_11_3() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(4, 7, 15, 1, 13, 5, 14, 9) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(12, 1, 14, 4, 5, 15, 13, 10) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_0_6_9_8_7_3_2_11() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(13, 7, 12, 3, 11, 14, 1, 9) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_5_15_8_2_0_4_6_10() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_6_14_11_0_15_9_3_8() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_12_13_1_10_2_7_4_5() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(10, 8, 7, 1, 2, 4, 6, 5) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_15_9_3_13_11_14_12_0() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 16(R10), 32(R10), 48(R10), 64(R10), X15, X8, X9) + SHUFFLE_AVX() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 80(R10), 96(R10), 112(R10), 128(R10), X15, X8, X9) + SHUFFLE_AVX_INV() + + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 144(R10), 160(R10), 176(R10), 192(R10), X15, X8, X9) + SHUFFLE_AVX() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 208(R10), 224(R10), 240(R10), 256(R10), X15, X8, X9) + SHUFFLE_AVX_INV() + + VMOVDQU 32(AX), X14 + VMOVDQU 48(AX), X15 + VPXOR X0, X10, X10 + VPXOR X1, X11, X11 + VPXOR X2, X14, X14 + VPXOR X3, X15, X15 + VPXOR X4, X10, X10 + VPXOR X5, X11, X11 + VPXOR X6, X14, X2 + VPXOR X7, X15, X3 + VMOVDQU X2, 32(AX) + VMOVDQU X3, 48(AX) + + LEAQ 128(SI), SI + SUBQ $128, DI + JNE loop + + VMOVDQU X10, 0(AX) + VMOVDQU X11, 16(AX) + + MOVQ R8, 0(BX) + MOVQ R9, 8(BX) + VZEROUPPER + + RET diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s new file mode 100644 index 00000000..adfac00c --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s @@ -0,0 +1,278 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && gc && !purego + +#include "textflag.h" + +DATA ·iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 +DATA ·iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b +GLOBL ·iv0<>(SB), (NOPTR+RODATA), $16 + +DATA ·iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b +DATA ·iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·iv1<>(SB), (NOPTR+RODATA), $16 + +DATA ·iv2<>+0x00(SB)/8, $0x510e527fade682d1 +DATA ·iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f +GLOBL ·iv2<>(SB), (NOPTR+RODATA), $16 + +DATA ·iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b +DATA ·iv3<>+0x08(SB)/8, $0x5be0cd19137e2179 +GLOBL ·iv3<>(SB), (NOPTR+RODATA), $16 + +DATA ·c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·c40<>(SB), (NOPTR+RODATA), $16 + +DATA ·c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·c48<>(SB), (NOPTR+RODATA), $16 + +#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v6, t1; \ + PUNPCKLQDQ v6, t2; \ + PUNPCKHQDQ v7, v6; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ v7, t2; \ + MOVO t1, v7; \ + MOVO v2, t1; \ + PUNPCKHQDQ t2, v7; \ + PUNPCKLQDQ v3, t2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v3 + +#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v2, t1; \ + PUNPCKLQDQ v2, t2; \ + PUNPCKHQDQ v3, v2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ v3, t2; \ + MOVO t1, v3; \ + MOVO v6, t1; \ + PUNPCKHQDQ t2, v3; \ + PUNPCKLQDQ v7, t2; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v7 + +#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \ + PADDQ m0, v0; \ + PADDQ m1, v1; \ + PADDQ v2, v0; \ + PADDQ v3, v1; \ + PXOR v0, v6; \ + PXOR v1, v7; \ + PSHUFD $0xB1, v6, v6; \ + PSHUFD $0xB1, v7, v7; \ + PADDQ v6, v4; \ + PADDQ v7, v5; \ + PXOR v4, v2; \ + PXOR v5, v3; \ + PSHUFB c40, v2; \ + PSHUFB c40, v3; \ + PADDQ m2, v0; \ + PADDQ m3, v1; \ + PADDQ v2, v0; \ + PADDQ v3, v1; \ + PXOR v0, v6; \ + PXOR v1, v7; \ + PSHUFB c48, v6; \ + PSHUFB c48, v7; \ + PADDQ v6, v4; \ + PADDQ v7, v5; \ + PXOR v4, v2; \ + PXOR v5, v3; \ + MOVOU v2, t0; \ + PADDQ v2, t0; \ + PSRLQ $63, v2; \ + PXOR t0, v2; \ + MOVOU v3, t0; \ + PADDQ v3, t0; \ + PSRLQ $63, v3; \ + PXOR t0, v3 + +#define LOAD_MSG(m0, m1, m2, m3, src, i0, i1, i2, i3, i4, i5, i6, i7) \ + MOVQ i0*8(src), m0; \ + PINSRQ $1, i1*8(src), m0; \ + MOVQ i2*8(src), m1; \ + PINSRQ $1, i3*8(src), m1; \ + MOVQ i4*8(src), m2; \ + PINSRQ $1, i5*8(src), m2; \ + MOVQ i6*8(src), m3; \ + PINSRQ $1, i7*8(src), m3 + +// func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) +TEXT ·hashBlocksSSE4(SB), 4, $288-48 // frame size = 272 + 16 byte alignment + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + + MOVQ SP, R10 + ADDQ $15, R10 + ANDQ $~15, R10 + + MOVOU ·iv3<>(SB), X0 + MOVO X0, 0(R10) + XORQ CX, 0(R10) // 0(R10) = ·iv3 ^ (CX || 0) + + MOVOU ·c40<>(SB), X13 + MOVOU ·c48<>(SB), X14 + + MOVOU 0(AX), X12 + MOVOU 16(AX), X15 + + MOVQ 0(BX), R8 + MOVQ 8(BX), R9 + +loop: + ADDQ $128, R8 + CMPQ R8, $128 + JGE noinc + INCQ R9 + +noinc: + MOVQ R8, X8 + PINSRQ $1, R9, X8 + + MOVO X12, X0 + MOVO X15, X1 + MOVOU 32(AX), X2 + MOVOU 48(AX), X3 + MOVOU ·iv0<>(SB), X4 + MOVOU ·iv1<>(SB), X5 + MOVOU ·iv2<>(SB), X6 + + PXOR X8, X6 + MOVO 0(R10), X7 + + LOAD_MSG(X8, X9, X10, X11, SI, 0, 2, 4, 6, 1, 3, 5, 7) + MOVO X8, 16(R10) + MOVO X9, 32(R10) + MOVO X10, 48(R10) + MOVO X11, 64(R10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 8, 10, 12, 14, 9, 11, 13, 15) + MOVO X8, 80(R10) + MOVO X9, 96(R10) + MOVO X10, 112(R10) + MOVO X11, 128(R10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 14, 4, 9, 13, 10, 8, 15, 6) + MOVO X8, 144(R10) + MOVO X9, 160(R10) + MOVO X10, 176(R10) + MOVO X11, 192(R10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 1, 0, 11, 5, 12, 2, 7, 3) + MOVO X8, 208(R10) + MOVO X9, 224(R10) + MOVO X10, 240(R10) + MOVO X11, 256(R10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 11, 12, 5, 15, 8, 0, 2, 13) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 10, 3, 7, 9, 14, 6, 1, 4) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 7, 3, 13, 11, 9, 1, 12, 14) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 2, 5, 4, 15, 6, 10, 0, 8) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 9, 5, 2, 10, 0, 7, 4, 15) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 14, 11, 6, 3, 1, 12, 8, 13) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 2, 6, 0, 8, 12, 10, 11, 3) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 4, 7, 15, 1, 13, 5, 14, 9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 12, 1, 14, 4, 5, 15, 13, 10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 0, 6, 9, 8, 7, 3, 2, 11) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 13, 7, 12, 3, 11, 14, 1, 9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 5, 15, 8, 2, 0, 4, 6, 10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 6, 14, 11, 0, 15, 9, 3, 8) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 12, 13, 1, 10, 2, 7, 4, 5) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 10, 8, 7, 1, 2, 4, 6, 5) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 15, 9, 3, 13, 11, 14, 12, 0) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 16(R10), 32(R10), 48(R10), 64(R10), X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 80(R10), 96(R10), 112(R10), 128(R10), X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 144(R10), 160(R10), 176(R10), 192(R10), X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 208(R10), 224(R10), 240(R10), 256(R10), X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + MOVOU 32(AX), X10 + MOVOU 48(AX), X11 + PXOR X0, X12 + PXOR X1, X15 + PXOR X2, X10 + PXOR X3, X11 + PXOR X4, X12 + PXOR X5, X15 + PXOR X6, X10 + PXOR X7, X11 + MOVOU X10, 32(AX) + MOVOU X11, 48(AX) + + LEAQ 128(SI), SI + SUBQ $128, DI + JNE loop + + MOVOU X12, 0(AX) + MOVOU X15, 16(AX) + + MOVQ R8, 0(BX) + MOVQ R9, 8(BX) + + RET diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go b/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go new file mode 100644 index 00000000..3168a8aa --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go @@ -0,0 +1,182 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blake2b + +import ( + "encoding/binary" + "math/bits" +) + +// the precomputed values for BLAKE2b +// there are 12 16-byte arrays - one for each round +// the entries are calculated from the sigma constants. +var precomputed = [12][16]byte{ + {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15}, + {14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3}, + {11, 12, 5, 15, 8, 0, 2, 13, 10, 3, 7, 9, 14, 6, 1, 4}, + {7, 3, 13, 11, 9, 1, 12, 14, 2, 5, 4, 15, 6, 10, 0, 8}, + {9, 5, 2, 10, 0, 7, 4, 15, 14, 11, 6, 3, 1, 12, 8, 13}, + {2, 6, 0, 8, 12, 10, 11, 3, 4, 7, 15, 1, 13, 5, 14, 9}, + {12, 1, 14, 4, 5, 15, 13, 10, 0, 6, 9, 8, 7, 3, 2, 11}, + {13, 7, 12, 3, 11, 14, 1, 9, 5, 15, 8, 2, 0, 4, 6, 10}, + {6, 14, 11, 0, 15, 9, 3, 8, 12, 13, 1, 10, 2, 7, 4, 5}, + {10, 8, 7, 1, 2, 4, 6, 5, 15, 9, 3, 13, 11, 14, 12, 0}, + {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15}, // equal to the first + {14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3}, // equal to the second +} + +func hashBlocksGeneric(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + var m [16]uint64 + c0, c1 := c[0], c[1] + + for i := 0; i < len(blocks); { + c0 += BlockSize + if c0 < BlockSize { + c1++ + } + + v0, v1, v2, v3, v4, v5, v6, v7 := h[0], h[1], h[2], h[3], h[4], h[5], h[6], h[7] + v8, v9, v10, v11, v12, v13, v14, v15 := iv[0], iv[1], iv[2], iv[3], iv[4], iv[5], iv[6], iv[7] + v12 ^= c0 + v13 ^= c1 + v14 ^= flag + + for j := range m { + m[j] = binary.LittleEndian.Uint64(blocks[i:]) + i += 8 + } + + for j := range precomputed { + s := &(precomputed[j]) + + v0 += m[s[0]] + v0 += v4 + v12 ^= v0 + v12 = bits.RotateLeft64(v12, -32) + v8 += v12 + v4 ^= v8 + v4 = bits.RotateLeft64(v4, -24) + v1 += m[s[1]] + v1 += v5 + v13 ^= v1 + v13 = bits.RotateLeft64(v13, -32) + v9 += v13 + v5 ^= v9 + v5 = bits.RotateLeft64(v5, -24) + v2 += m[s[2]] + v2 += v6 + v14 ^= v2 + v14 = bits.RotateLeft64(v14, -32) + v10 += v14 + v6 ^= v10 + v6 = bits.RotateLeft64(v6, -24) + v3 += m[s[3]] + v3 += v7 + v15 ^= v3 + v15 = bits.RotateLeft64(v15, -32) + v11 += v15 + v7 ^= v11 + v7 = bits.RotateLeft64(v7, -24) + + v0 += m[s[4]] + v0 += v4 + v12 ^= v0 + v12 = bits.RotateLeft64(v12, -16) + v8 += v12 + v4 ^= v8 + v4 = bits.RotateLeft64(v4, -63) + v1 += m[s[5]] + v1 += v5 + v13 ^= v1 + v13 = bits.RotateLeft64(v13, -16) + v9 += v13 + v5 ^= v9 + v5 = bits.RotateLeft64(v5, -63) + v2 += m[s[6]] + v2 += v6 + v14 ^= v2 + v14 = bits.RotateLeft64(v14, -16) + v10 += v14 + v6 ^= v10 + v6 = bits.RotateLeft64(v6, -63) + v3 += m[s[7]] + v3 += v7 + v15 ^= v3 + v15 = bits.RotateLeft64(v15, -16) + v11 += v15 + v7 ^= v11 + v7 = bits.RotateLeft64(v7, -63) + + v0 += m[s[8]] + v0 += v5 + v15 ^= v0 + v15 = bits.RotateLeft64(v15, -32) + v10 += v15 + v5 ^= v10 + v5 = bits.RotateLeft64(v5, -24) + v1 += m[s[9]] + v1 += v6 + v12 ^= v1 + v12 = bits.RotateLeft64(v12, -32) + v11 += v12 + v6 ^= v11 + v6 = bits.RotateLeft64(v6, -24) + v2 += m[s[10]] + v2 += v7 + v13 ^= v2 + v13 = bits.RotateLeft64(v13, -32) + v8 += v13 + v7 ^= v8 + v7 = bits.RotateLeft64(v7, -24) + v3 += m[s[11]] + v3 += v4 + v14 ^= v3 + v14 = bits.RotateLeft64(v14, -32) + v9 += v14 + v4 ^= v9 + v4 = bits.RotateLeft64(v4, -24) + + v0 += m[s[12]] + v0 += v5 + v15 ^= v0 + v15 = bits.RotateLeft64(v15, -16) + v10 += v15 + v5 ^= v10 + v5 = bits.RotateLeft64(v5, -63) + v1 += m[s[13]] + v1 += v6 + v12 ^= v1 + v12 = bits.RotateLeft64(v12, -16) + v11 += v12 + v6 ^= v11 + v6 = bits.RotateLeft64(v6, -63) + v2 += m[s[14]] + v2 += v7 + v13 ^= v2 + v13 = bits.RotateLeft64(v13, -16) + v8 += v13 + v7 ^= v8 + v7 = bits.RotateLeft64(v7, -63) + v3 += m[s[15]] + v3 += v4 + v14 ^= v3 + v14 = bits.RotateLeft64(v14, -16) + v9 += v14 + v4 ^= v9 + v4 = bits.RotateLeft64(v4, -63) + + } + + h[0] ^= v0 ^ v8 + h[1] ^= v1 ^ v9 + h[2] ^= v2 ^ v10 + h[3] ^= v3 ^ v11 + h[4] ^= v4 ^ v12 + h[5] ^= v5 ^ v13 + h[6] ^= v6 ^ v14 + h[7] ^= v7 ^ v15 + } + c[0], c[1] = c0, c1 +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go new file mode 100644 index 00000000..6e28668c --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go @@ -0,0 +1,11 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !amd64 || purego || !gc + +package blake2b + +func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + hashBlocksGeneric(h, c, flag, blocks) +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2x.go b/vendor/golang.org/x/crypto/blake2b/blake2x.go new file mode 100644 index 00000000..52c414db --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2x.go @@ -0,0 +1,177 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blake2b + +import ( + "encoding/binary" + "errors" + "io" +) + +// XOF defines the interface to hash functions that +// support arbitrary-length output. +type XOF interface { + // Write absorbs more data into the hash's state. It panics if called + // after Read. + io.Writer + + // Read reads more output from the hash. It returns io.EOF if the limit + // has been reached. + io.Reader + + // Clone returns a copy of the XOF in its current state. + Clone() XOF + + // Reset resets the XOF to its initial state. + Reset() +} + +// OutputLengthUnknown can be used as the size argument to NewXOF to indicate +// the length of the output is not known in advance. +const OutputLengthUnknown = 0 + +// magicUnknownOutputLength is a magic value for the output size that indicates +// an unknown number of output bytes. +const magicUnknownOutputLength = (1 << 32) - 1 + +// maxOutputLength is the absolute maximum number of bytes to produce when the +// number of output bytes is unknown. +const maxOutputLength = (1 << 32) * 64 + +// NewXOF creates a new variable-output-length hash. The hash either produce a +// known number of bytes (1 <= size < 2**32-1), or an unknown number of bytes +// (size == OutputLengthUnknown). In the latter case, an absolute limit of +// 256GiB applies. +// +// A non-nil key turns the hash into a MAC. The key must between +// zero and 32 bytes long. +func NewXOF(size uint32, key []byte) (XOF, error) { + if len(key) > Size { + return nil, errKeySize + } + if size == magicUnknownOutputLength { + // 2^32-1 indicates an unknown number of bytes and thus isn't a + // valid length. + return nil, errors.New("blake2b: XOF length too large") + } + if size == OutputLengthUnknown { + size = magicUnknownOutputLength + } + x := &xof{ + d: digest{ + size: Size, + keyLen: len(key), + }, + length: size, + } + copy(x.d.key[:], key) + x.Reset() + return x, nil +} + +type xof struct { + d digest + length uint32 + remaining uint64 + cfg, root, block [Size]byte + offset int + nodeOffset uint32 + readMode bool +} + +func (x *xof) Write(p []byte) (n int, err error) { + if x.readMode { + panic("blake2b: write to XOF after read") + } + return x.d.Write(p) +} + +func (x *xof) Clone() XOF { + clone := *x + return &clone +} + +func (x *xof) Reset() { + x.cfg[0] = byte(Size) + binary.LittleEndian.PutUint32(x.cfg[4:], uint32(Size)) // leaf length + binary.LittleEndian.PutUint32(x.cfg[12:], x.length) // XOF length + x.cfg[17] = byte(Size) // inner hash size + + x.d.Reset() + x.d.h[1] ^= uint64(x.length) << 32 + + x.remaining = uint64(x.length) + if x.remaining == magicUnknownOutputLength { + x.remaining = maxOutputLength + } + x.offset, x.nodeOffset = 0, 0 + x.readMode = false +} + +func (x *xof) Read(p []byte) (n int, err error) { + if !x.readMode { + x.d.finalize(&x.root) + x.readMode = true + } + + if x.remaining == 0 { + return 0, io.EOF + } + + n = len(p) + if uint64(n) > x.remaining { + n = int(x.remaining) + p = p[:n] + } + + if x.offset > 0 { + blockRemaining := Size - x.offset + if n < blockRemaining { + x.offset += copy(p, x.block[x.offset:]) + x.remaining -= uint64(n) + return + } + copy(p, x.block[x.offset:]) + p = p[blockRemaining:] + x.offset = 0 + x.remaining -= uint64(blockRemaining) + } + + for len(p) >= Size { + binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset) + x.nodeOffset++ + + x.d.initConfig(&x.cfg) + x.d.Write(x.root[:]) + x.d.finalize(&x.block) + + copy(p, x.block[:]) + p = p[Size:] + x.remaining -= uint64(Size) + } + + if todo := len(p); todo > 0 { + if x.remaining < uint64(Size) { + x.cfg[0] = byte(x.remaining) + } + binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset) + x.nodeOffset++ + + x.d.initConfig(&x.cfg) + x.d.Write(x.root[:]) + x.d.finalize(&x.block) + + x.offset = copy(p, x.block[:todo]) + x.remaining -= uint64(todo) + } + return +} + +func (d *digest) initConfig(cfg *[Size]byte) { + d.offset, d.c[0], d.c[1] = 0, 0, 0 + for i := range d.h { + d.h[i] = iv[i] ^ binary.LittleEndian.Uint64(cfg[i*8:]) + } +} diff --git a/vendor/golang.org/x/crypto/blake2b/register.go b/vendor/golang.org/x/crypto/blake2b/register.go new file mode 100644 index 00000000..54e446e1 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/register.go @@ -0,0 +1,30 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blake2b + +import ( + "crypto" + "hash" +) + +func init() { + newHash256 := func() hash.Hash { + h, _ := New256(nil) + return h + } + newHash384 := func() hash.Hash { + h, _ := New384(nil) + return h + } + + newHash512 := func() hash.Hash { + h, _ := New512(nil) + return h + } + + crypto.RegisterHash(crypto.BLAKE2b_256, newHash256) + crypto.RegisterHash(crypto.BLAKE2b_384, newHash384) + crypto.RegisterHash(crypto.BLAKE2b_512, newHash512) +} diff --git a/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go b/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go deleted file mode 100644 index d33c8890..00000000 --- a/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.13 - -package poly1305 - -// Generic fallbacks for the math/bits intrinsics, copied from -// src/math/bits/bits.go. They were added in Go 1.12, but Add64 and Sum64 had -// variable time fallbacks until Go 1.13. - -func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) { - sum = x + y + carry - carryOut = ((x & y) | ((x | y) &^ sum)) >> 63 - return -} - -func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) { - diff = x - y - borrow - borrowOut = ((^x & y) | (^(x ^ y) & diff)) >> 63 - return -} - -func bitsMul64(x, y uint64) (hi, lo uint64) { - const mask32 = 1<<32 - 1 - x0 := x & mask32 - x1 := x >> 32 - y0 := y & mask32 - y1 := y >> 32 - w0 := x0 * y0 - t := x1*y0 + w0>>32 - w1 := t & mask32 - w2 := t >> 32 - w1 += x0 * y1 - hi = x1*y1 + w2 + w1>>32 - lo = x * y - return -} diff --git a/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go b/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go deleted file mode 100644 index 495c1fa6..00000000 --- a/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.13 - -package poly1305 - -import "math/bits" - -func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) { - return bits.Add64(x, y, carry) -} - -func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) { - return bits.Sub64(x, y, borrow) -} - -func bitsMul64(x, y uint64) (hi, lo uint64) { - return bits.Mul64(x, y) -} diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go b/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go index e041da5e..ec2202bd 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go @@ -7,7 +7,10 @@ package poly1305 -import "encoding/binary" +import ( + "encoding/binary" + "math/bits" +) // Poly1305 [RFC 7539] is a relatively simple algorithm: the authentication tag // for a 64 bytes message is approximately @@ -114,13 +117,13 @@ type uint128 struct { } func mul64(a, b uint64) uint128 { - hi, lo := bitsMul64(a, b) + hi, lo := bits.Mul64(a, b) return uint128{lo, hi} } func add128(a, b uint128) uint128 { - lo, c := bitsAdd64(a.lo, b.lo, 0) - hi, c := bitsAdd64(a.hi, b.hi, c) + lo, c := bits.Add64(a.lo, b.lo, 0) + hi, c := bits.Add64(a.hi, b.hi, c) if c != 0 { panic("poly1305: unexpected overflow") } @@ -155,8 +158,8 @@ func updateGeneric(state *macState, msg []byte) { // hide leading zeroes. For full chunks, that's 1 << 128, so we can just // add 1 to the most significant (2¹²â¸) limb, h2. if len(msg) >= TagSize { - h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(msg[0:8]), 0) - h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(msg[8:16]), c) + h0, c = bits.Add64(h0, binary.LittleEndian.Uint64(msg[0:8]), 0) + h1, c = bits.Add64(h1, binary.LittleEndian.Uint64(msg[8:16]), c) h2 += c + 1 msg = msg[TagSize:] @@ -165,8 +168,8 @@ func updateGeneric(state *macState, msg []byte) { copy(buf[:], msg) buf[len(msg)] = 1 - h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(buf[0:8]), 0) - h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(buf[8:16]), c) + h0, c = bits.Add64(h0, binary.LittleEndian.Uint64(buf[0:8]), 0) + h1, c = bits.Add64(h1, binary.LittleEndian.Uint64(buf[8:16]), c) h2 += c msg = nil @@ -219,9 +222,9 @@ func updateGeneric(state *macState, msg []byte) { m3 := h2r1 t0 := m0.lo - t1, c := bitsAdd64(m1.lo, m0.hi, 0) - t2, c := bitsAdd64(m2.lo, m1.hi, c) - t3, _ := bitsAdd64(m3.lo, m2.hi, c) + t1, c := bits.Add64(m1.lo, m0.hi, 0) + t2, c := bits.Add64(m2.lo, m1.hi, c) + t3, _ := bits.Add64(m3.lo, m2.hi, c) // Now we have the result as 4 64-bit limbs, and we need to reduce it // modulo 2¹³Ⱐ- 5. The special shape of this Crandall prime lets us do @@ -243,14 +246,14 @@ func updateGeneric(state *macState, msg []byte) { // To add c * 5 to h, we first add cc = c * 4, and then add (cc >> 2) = c. - h0, c = bitsAdd64(h0, cc.lo, 0) - h1, c = bitsAdd64(h1, cc.hi, c) + h0, c = bits.Add64(h0, cc.lo, 0) + h1, c = bits.Add64(h1, cc.hi, c) h2 += c cc = shiftRightBy2(cc) - h0, c = bitsAdd64(h0, cc.lo, 0) - h1, c = bitsAdd64(h1, cc.hi, c) + h0, c = bits.Add64(h0, cc.lo, 0) + h1, c = bits.Add64(h1, cc.hi, c) h2 += c // h2 is at most 3 + 1 + 1 = 5, making the whole of h at most @@ -287,9 +290,9 @@ func finalize(out *[TagSize]byte, h *[3]uint64, s *[2]uint64) { // in constant time, we compute t = h - (2¹³Ⱐ- 5), and select h as the // result if the subtraction underflows, and t otherwise. - hMinusP0, b := bitsSub64(h0, p0, 0) - hMinusP1, b := bitsSub64(h1, p1, b) - _, b = bitsSub64(h2, p2, b) + hMinusP0, b := bits.Sub64(h0, p0, 0) + hMinusP1, b := bits.Sub64(h1, p1, b) + _, b = bits.Sub64(h2, p2, b) // h = h if h < p else h - p h0 = select64(b, h0, hMinusP0) @@ -301,8 +304,8 @@ func finalize(out *[TagSize]byte, h *[3]uint64, s *[2]uint64) { // // by just doing a wide addition with the 128 low bits of h and discarding // the overflow. - h0, c := bitsAdd64(h0, s[0], 0) - h1, _ = bitsAdd64(h1, s[1], c) + h0, c := bits.Add64(h0, s[0], 0) + h1, _ = bits.Add64(h1, s[1], c) binary.LittleEndian.PutUint64(out[0:8], h0) binary.LittleEndian.PutUint64(out[8:16], h1) diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s index d2ca5dee..b3c1699b 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s @@ -19,15 +19,14 @@ #define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3, t4, t5) \ MULLD r0, h0, t0; \ - MULLD r0, h1, t4; \ MULHDU r0, h0, t1; \ + MULLD r0, h1, t4; \ MULHDU r0, h1, t5; \ ADDC t4, t1, t1; \ MULLD r0, h2, t2; \ - ADDZE t5; \ MULHDU r1, h0, t4; \ MULLD r1, h0, h0; \ - ADD t5, t2, t2; \ + ADDE t5, t2, t2; \ ADDC h0, t1, t1; \ MULLD h2, r1, t3; \ ADDZE t4, h0; \ @@ -37,13 +36,11 @@ ADDE t5, t3, t3; \ ADDC h0, t2, t2; \ MOVD $-4, t4; \ - MOVD t0, h0; \ - MOVD t1, h1; \ ADDZE t3; \ - ANDCC $3, t2, h2; \ - AND t2, t4, t0; \ + RLDICL $0, t2, $62, h2; \ + AND t2, t4, h0; \ ADDC t0, h0, h0; \ - ADDE t3, h1, h1; \ + ADDE t3, t1, h1; \ SLD $62, t3, t4; \ SRD $2, t2; \ ADDZE h2; \ @@ -75,6 +72,7 @@ TEXT ·update(SB), $0-32 loop: POLY1305_ADD(R4, R8, R9, R10, R20, R21, R22) + PCALIGN $16 multiply: POLY1305_MUL(R8, R9, R10, R11, R12, R16, R17, R18, R14, R20, R21) ADD $-16, R5 diff --git a/vendor/golang.org/x/crypto/ocsp/ocsp.go b/vendor/golang.org/x/crypto/ocsp/ocsp.go index 4269ed11..bf225953 100644 --- a/vendor/golang.org/x/crypto/ocsp/ocsp.go +++ b/vendor/golang.org/x/crypto/ocsp/ocsp.go @@ -279,21 +279,22 @@ func getOIDFromHashAlgorithm(target crypto.Hash) asn1.ObjectIdentifier { // This is the exposed reflection of the internal OCSP structures. -// The status values that can be expressed in OCSP. See RFC 6960. +// The status values that can be expressed in OCSP. See RFC 6960. +// These are used for the Response.Status field. const ( // Good means that the certificate is valid. - Good = iota + Good = 0 // Revoked means that the certificate has been deliberately revoked. - Revoked + Revoked = 1 // Unknown means that the OCSP responder doesn't know about the certificate. - Unknown + Unknown = 2 // ServerFailed is unused and was never used (see // https://go-review.googlesource.com/#/c/18944). ParseResponse will // return a ResponseError when an error response is parsed. - ServerFailed + ServerFailed = 3 ) -// The enumerated reasons for revoking a certificate. See RFC 5280. +// The enumerated reasons for revoking a certificate. See RFC 5280. const ( Unspecified = 0 KeyCompromise = 1 diff --git a/vendor/golang.org/x/crypto/ripemd160/ripemd160.go b/vendor/golang.org/x/crypto/ripemd160/ripemd160.go new file mode 100644 index 00000000..cf3eeb15 --- /dev/null +++ b/vendor/golang.org/x/crypto/ripemd160/ripemd160.go @@ -0,0 +1,124 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ripemd160 implements the RIPEMD-160 hash algorithm. +// +// Deprecated: RIPEMD-160 is a legacy hash and should not be used for new +// applications. Also, this package does not and will not provide an optimized +// implementation. Instead, use a modern hash like SHA-256 (from crypto/sha256). +package ripemd160 // import "golang.org/x/crypto/ripemd160" + +// RIPEMD-160 is designed by Hans Dobbertin, Antoon Bosselaers, and Bart +// Preneel with specifications available at: +// http://homes.esat.kuleuven.be/~cosicart/pdf/AB-9601/AB-9601.pdf. + +import ( + "crypto" + "hash" +) + +func init() { + crypto.RegisterHash(crypto.RIPEMD160, New) +} + +// The size of the checksum in bytes. +const Size = 20 + +// The block size of the hash algorithm in bytes. +const BlockSize = 64 + +const ( + _s0 = 0x67452301 + _s1 = 0xefcdab89 + _s2 = 0x98badcfe + _s3 = 0x10325476 + _s4 = 0xc3d2e1f0 +) + +// digest represents the partial evaluation of a checksum. +type digest struct { + s [5]uint32 // running context + x [BlockSize]byte // temporary buffer + nx int // index into x + tc uint64 // total count of bytes processed +} + +func (d *digest) Reset() { + d.s[0], d.s[1], d.s[2], d.s[3], d.s[4] = _s0, _s1, _s2, _s3, _s4 + d.nx = 0 + d.tc = 0 +} + +// New returns a new hash.Hash computing the checksum. +func New() hash.Hash { + result := new(digest) + result.Reset() + return result +} + +func (d *digest) Size() int { return Size } + +func (d *digest) BlockSize() int { return BlockSize } + +func (d *digest) Write(p []byte) (nn int, err error) { + nn = len(p) + d.tc += uint64(nn) + if d.nx > 0 { + n := len(p) + if n > BlockSize-d.nx { + n = BlockSize - d.nx + } + for i := 0; i < n; i++ { + d.x[d.nx+i] = p[i] + } + d.nx += n + if d.nx == BlockSize { + _Block(d, d.x[0:]) + d.nx = 0 + } + p = p[n:] + } + n := _Block(d, p) + p = p[n:] + if len(p) > 0 { + d.nx = copy(d.x[:], p) + } + return +} + +func (d0 *digest) Sum(in []byte) []byte { + // Make a copy of d0 so that caller can keep writing and summing. + d := *d0 + + // Padding. Add a 1 bit and 0 bits until 56 bytes mod 64. + tc := d.tc + var tmp [64]byte + tmp[0] = 0x80 + if tc%64 < 56 { + d.Write(tmp[0 : 56-tc%64]) + } else { + d.Write(tmp[0 : 64+56-tc%64]) + } + + // Length in bits. + tc <<= 3 + for i := uint(0); i < 8; i++ { + tmp[i] = byte(tc >> (8 * i)) + } + d.Write(tmp[0:8]) + + if d.nx != 0 { + panic("d.nx != 0") + } + + var digest [Size]byte + for i, s := range d.s { + digest[i*4] = byte(s) + digest[i*4+1] = byte(s >> 8) + digest[i*4+2] = byte(s >> 16) + digest[i*4+3] = byte(s >> 24) + } + + return append(in, digest[:]...) +} diff --git a/vendor/golang.org/x/crypto/ripemd160/ripemd160block.go b/vendor/golang.org/x/crypto/ripemd160/ripemd160block.go new file mode 100644 index 00000000..e0edc02f --- /dev/null +++ b/vendor/golang.org/x/crypto/ripemd160/ripemd160block.go @@ -0,0 +1,165 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// RIPEMD-160 block step. +// In its own file so that a faster assembly or C version +// can be substituted easily. + +package ripemd160 + +import ( + "math/bits" +) + +// work buffer indices and roll amounts for one line +var _n = [80]uint{ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 7, 4, 13, 1, 10, 6, 15, 3, 12, 0, 9, 5, 2, 14, 11, 8, + 3, 10, 14, 4, 9, 15, 8, 1, 2, 7, 0, 6, 13, 11, 5, 12, + 1, 9, 11, 10, 0, 8, 12, 4, 13, 3, 7, 15, 14, 5, 6, 2, + 4, 0, 5, 9, 7, 12, 2, 10, 14, 1, 3, 8, 11, 6, 15, 13, +} + +var _r = [80]uint{ + 11, 14, 15, 12, 5, 8, 7, 9, 11, 13, 14, 15, 6, 7, 9, 8, + 7, 6, 8, 13, 11, 9, 7, 15, 7, 12, 15, 9, 11, 7, 13, 12, + 11, 13, 6, 7, 14, 9, 13, 15, 14, 8, 13, 6, 5, 12, 7, 5, + 11, 12, 14, 15, 14, 15, 9, 8, 9, 14, 5, 6, 8, 6, 5, 12, + 9, 15, 5, 11, 6, 8, 13, 12, 5, 12, 13, 14, 11, 8, 5, 6, +} + +// same for the other parallel one +var n_ = [80]uint{ + 5, 14, 7, 0, 9, 2, 11, 4, 13, 6, 15, 8, 1, 10, 3, 12, + 6, 11, 3, 7, 0, 13, 5, 10, 14, 15, 8, 12, 4, 9, 1, 2, + 15, 5, 1, 3, 7, 14, 6, 9, 11, 8, 12, 2, 10, 0, 4, 13, + 8, 6, 4, 1, 3, 11, 15, 0, 5, 12, 2, 13, 9, 7, 10, 14, + 12, 15, 10, 4, 1, 5, 8, 7, 6, 2, 13, 14, 0, 3, 9, 11, +} + +var r_ = [80]uint{ + 8, 9, 9, 11, 13, 15, 15, 5, 7, 7, 8, 11, 14, 14, 12, 6, + 9, 13, 15, 7, 12, 8, 9, 11, 7, 7, 12, 7, 6, 15, 13, 11, + 9, 7, 15, 11, 8, 6, 6, 14, 12, 13, 5, 14, 13, 13, 7, 5, + 15, 5, 8, 11, 14, 14, 6, 14, 6, 9, 12, 9, 12, 5, 15, 8, + 8, 5, 12, 9, 12, 5, 14, 6, 8, 13, 6, 5, 15, 13, 11, 11, +} + +func _Block(md *digest, p []byte) int { + n := 0 + var x [16]uint32 + var alpha, beta uint32 + for len(p) >= BlockSize { + a, b, c, d, e := md.s[0], md.s[1], md.s[2], md.s[3], md.s[4] + aa, bb, cc, dd, ee := a, b, c, d, e + j := 0 + for i := 0; i < 16; i++ { + x[i] = uint32(p[j]) | uint32(p[j+1])<<8 | uint32(p[j+2])<<16 | uint32(p[j+3])<<24 + j += 4 + } + + // round 1 + i := 0 + for i < 16 { + alpha = a + (b ^ c ^ d) + x[_n[i]] + s := int(_r[i]) + alpha = bits.RotateLeft32(alpha, s) + e + beta = bits.RotateLeft32(c, 10) + a, b, c, d, e = e, alpha, b, beta, d + + // parallel line + alpha = aa + (bb ^ (cc | ^dd)) + x[n_[i]] + 0x50a28be6 + s = int(r_[i]) + alpha = bits.RotateLeft32(alpha, s) + ee + beta = bits.RotateLeft32(cc, 10) + aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd + + i++ + } + + // round 2 + for i < 32 { + alpha = a + (b&c | ^b&d) + x[_n[i]] + 0x5a827999 + s := int(_r[i]) + alpha = bits.RotateLeft32(alpha, s) + e + beta = bits.RotateLeft32(c, 10) + a, b, c, d, e = e, alpha, b, beta, d + + // parallel line + alpha = aa + (bb&dd | cc&^dd) + x[n_[i]] + 0x5c4dd124 + s = int(r_[i]) + alpha = bits.RotateLeft32(alpha, s) + ee + beta = bits.RotateLeft32(cc, 10) + aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd + + i++ + } + + // round 3 + for i < 48 { + alpha = a + (b | ^c ^ d) + x[_n[i]] + 0x6ed9eba1 + s := int(_r[i]) + alpha = bits.RotateLeft32(alpha, s) + e + beta = bits.RotateLeft32(c, 10) + a, b, c, d, e = e, alpha, b, beta, d + + // parallel line + alpha = aa + (bb | ^cc ^ dd) + x[n_[i]] + 0x6d703ef3 + s = int(r_[i]) + alpha = bits.RotateLeft32(alpha, s) + ee + beta = bits.RotateLeft32(cc, 10) + aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd + + i++ + } + + // round 4 + for i < 64 { + alpha = a + (b&d | c&^d) + x[_n[i]] + 0x8f1bbcdc + s := int(_r[i]) + alpha = bits.RotateLeft32(alpha, s) + e + beta = bits.RotateLeft32(c, 10) + a, b, c, d, e = e, alpha, b, beta, d + + // parallel line + alpha = aa + (bb&cc | ^bb&dd) + x[n_[i]] + 0x7a6d76e9 + s = int(r_[i]) + alpha = bits.RotateLeft32(alpha, s) + ee + beta = bits.RotateLeft32(cc, 10) + aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd + + i++ + } + + // round 5 + for i < 80 { + alpha = a + (b ^ (c | ^d)) + x[_n[i]] + 0xa953fd4e + s := int(_r[i]) + alpha = bits.RotateLeft32(alpha, s) + e + beta = bits.RotateLeft32(c, 10) + a, b, c, d, e = e, alpha, b, beta, d + + // parallel line + alpha = aa + (bb ^ cc ^ dd) + x[n_[i]] + s = int(r_[i]) + alpha = bits.RotateLeft32(alpha, s) + ee + beta = bits.RotateLeft32(cc, 10) + aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd + + i++ + } + + // combine results + dd += c + md.s[1] + md.s[1] = md.s[2] + d + ee + md.s[2] = md.s[3] + e + aa + md.s[3] = md.s[4] + a + bb + md.s[4] = md.s[0] + b + cc + md.s[0] = dd + + p = p[BlockSize:] + n += BlockSize + } + return n +} diff --git a/vendor/golang.org/x/crypto/twofish/twofish.go b/vendor/golang.org/x/crypto/twofish/twofish.go new file mode 100644 index 00000000..e4eeae17 --- /dev/null +++ b/vendor/golang.org/x/crypto/twofish/twofish.go @@ -0,0 +1,341 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package twofish implements Bruce Schneier's Twofish encryption algorithm. +// +// Deprecated: Twofish is a legacy cipher and should not be used for new +// applications. Also, this package does not and will not provide an optimized +// implementation. Instead, use AES (from crypto/aes, if necessary in an AEAD +// mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from +// golang.org/x/crypto/chacha20poly1305). +package twofish // import "golang.org/x/crypto/twofish" + +// Twofish is defined in https://www.schneier.com/paper-twofish-paper.pdf [TWOFISH] + +// This code is a port of the LibTom C implementation. +// See http://libtom.org/?page=features&newsitems=5&whatfile=crypt. +// LibTomCrypt is free for all purposes under the public domain. +// It was heavily inspired by the go blowfish package. + +import ( + "math/bits" + "strconv" +) + +// BlockSize is the constant block size of Twofish. +const BlockSize = 16 + +const mdsPolynomial = 0x169 // x^8 + x^6 + x^5 + x^3 + 1, see [TWOFISH] 4.2 +const rsPolynomial = 0x14d // x^8 + x^6 + x^3 + x^2 + 1, see [TWOFISH] 4.3 + +// A Cipher is an instance of Twofish encryption using a particular key. +type Cipher struct { + s [4][256]uint32 + k [40]uint32 +} + +type KeySizeError int + +func (k KeySizeError) Error() string { + return "crypto/twofish: invalid key size " + strconv.Itoa(int(k)) +} + +// NewCipher creates and returns a Cipher. +// The key argument should be the Twofish key, 16, 24 or 32 bytes. +func NewCipher(key []byte) (*Cipher, error) { + keylen := len(key) + + if keylen != 16 && keylen != 24 && keylen != 32 { + return nil, KeySizeError(keylen) + } + + // k is the number of 64 bit words in key + k := keylen / 8 + + // Create the S[..] words + var S [4 * 4]byte + for i := 0; i < k; i++ { + // Computes [y0 y1 y2 y3] = rs . [x0 x1 x2 x3 x4 x5 x6 x7] + for j, rsRow := range rs { + for k, rsVal := range rsRow { + S[4*i+j] ^= gfMult(key[8*i+k], rsVal, rsPolynomial) + } + } + } + + // Calculate subkeys + c := new(Cipher) + var tmp [4]byte + for i := byte(0); i < 20; i++ { + // A = h(p * 2x, Me) + for j := range tmp { + tmp[j] = 2 * i + } + A := h(tmp[:], key, 0) + + // B = rolc(h(p * (2x + 1), Mo), 8) + for j := range tmp { + tmp[j] = 2*i + 1 + } + B := h(tmp[:], key, 1) + B = bits.RotateLeft32(B, 8) + + c.k[2*i] = A + B + + // K[2i+1] = (A + 2B) <<< 9 + c.k[2*i+1] = bits.RotateLeft32(2*B+A, 9) + } + + // Calculate sboxes + switch k { + case 2: + for i := range c.s[0] { + c.s[0][i] = mdsColumnMult(sbox[1][sbox[0][sbox[0][byte(i)]^S[0]]^S[4]], 0) + c.s[1][i] = mdsColumnMult(sbox[0][sbox[0][sbox[1][byte(i)]^S[1]]^S[5]], 1) + c.s[2][i] = mdsColumnMult(sbox[1][sbox[1][sbox[0][byte(i)]^S[2]]^S[6]], 2) + c.s[3][i] = mdsColumnMult(sbox[0][sbox[1][sbox[1][byte(i)]^S[3]]^S[7]], 3) + } + case 3: + for i := range c.s[0] { + c.s[0][i] = mdsColumnMult(sbox[1][sbox[0][sbox[0][sbox[1][byte(i)]^S[0]]^S[4]]^S[8]], 0) + c.s[1][i] = mdsColumnMult(sbox[0][sbox[0][sbox[1][sbox[1][byte(i)]^S[1]]^S[5]]^S[9]], 1) + c.s[2][i] = mdsColumnMult(sbox[1][sbox[1][sbox[0][sbox[0][byte(i)]^S[2]]^S[6]]^S[10]], 2) + c.s[3][i] = mdsColumnMult(sbox[0][sbox[1][sbox[1][sbox[0][byte(i)]^S[3]]^S[7]]^S[11]], 3) + } + default: + for i := range c.s[0] { + c.s[0][i] = mdsColumnMult(sbox[1][sbox[0][sbox[0][sbox[1][sbox[1][byte(i)]^S[0]]^S[4]]^S[8]]^S[12]], 0) + c.s[1][i] = mdsColumnMult(sbox[0][sbox[0][sbox[1][sbox[1][sbox[0][byte(i)]^S[1]]^S[5]]^S[9]]^S[13]], 1) + c.s[2][i] = mdsColumnMult(sbox[1][sbox[1][sbox[0][sbox[0][sbox[0][byte(i)]^S[2]]^S[6]]^S[10]]^S[14]], 2) + c.s[3][i] = mdsColumnMult(sbox[0][sbox[1][sbox[1][sbox[0][sbox[1][byte(i)]^S[3]]^S[7]]^S[11]]^S[15]], 3) + } + } + + return c, nil +} + +// BlockSize returns the Twofish block size, 16 bytes. +func (c *Cipher) BlockSize() int { return BlockSize } + +// store32l stores src in dst in little-endian form. +func store32l(dst []byte, src uint32) { + dst[0] = byte(src) + dst[1] = byte(src >> 8) + dst[2] = byte(src >> 16) + dst[3] = byte(src >> 24) + return +} + +// load32l reads a little-endian uint32 from src. +func load32l(src []byte) uint32 { + return uint32(src[0]) | uint32(src[1])<<8 | uint32(src[2])<<16 | uint32(src[3])<<24 +} + +// The RS matrix. See [TWOFISH] 4.3 +var rs = [4][8]byte{ + {0x01, 0xA4, 0x55, 0x87, 0x5A, 0x58, 0xDB, 0x9E}, + {0xA4, 0x56, 0x82, 0xF3, 0x1E, 0xC6, 0x68, 0xE5}, + {0x02, 0xA1, 0xFC, 0xC1, 0x47, 0xAE, 0x3D, 0x19}, + {0xA4, 0x55, 0x87, 0x5A, 0x58, 0xDB, 0x9E, 0x03}, +} + +// sbox tables +var sbox = [2][256]byte{ + { + 0xa9, 0x67, 0xb3, 0xe8, 0x04, 0xfd, 0xa3, 0x76, 0x9a, 0x92, 0x80, 0x78, 0xe4, 0xdd, 0xd1, 0x38, + 0x0d, 0xc6, 0x35, 0x98, 0x18, 0xf7, 0xec, 0x6c, 0x43, 0x75, 0x37, 0x26, 0xfa, 0x13, 0x94, 0x48, + 0xf2, 0xd0, 0x8b, 0x30, 0x84, 0x54, 0xdf, 0x23, 0x19, 0x5b, 0x3d, 0x59, 0xf3, 0xae, 0xa2, 0x82, + 0x63, 0x01, 0x83, 0x2e, 0xd9, 0x51, 0x9b, 0x7c, 0xa6, 0xeb, 0xa5, 0xbe, 0x16, 0x0c, 0xe3, 0x61, + 0xc0, 0x8c, 0x3a, 0xf5, 0x73, 0x2c, 0x25, 0x0b, 0xbb, 0x4e, 0x89, 0x6b, 0x53, 0x6a, 0xb4, 0xf1, + 0xe1, 0xe6, 0xbd, 0x45, 0xe2, 0xf4, 0xb6, 0x66, 0xcc, 0x95, 0x03, 0x56, 0xd4, 0x1c, 0x1e, 0xd7, + 0xfb, 0xc3, 0x8e, 0xb5, 0xe9, 0xcf, 0xbf, 0xba, 0xea, 0x77, 0x39, 0xaf, 0x33, 0xc9, 0x62, 0x71, + 0x81, 0x79, 0x09, 0xad, 0x24, 0xcd, 0xf9, 0xd8, 0xe5, 0xc5, 0xb9, 0x4d, 0x44, 0x08, 0x86, 0xe7, + 0xa1, 0x1d, 0xaa, 0xed, 0x06, 0x70, 0xb2, 0xd2, 0x41, 0x7b, 0xa0, 0x11, 0x31, 0xc2, 0x27, 0x90, + 0x20, 0xf6, 0x60, 0xff, 0x96, 0x5c, 0xb1, 0xab, 0x9e, 0x9c, 0x52, 0x1b, 0x5f, 0x93, 0x0a, 0xef, + 0x91, 0x85, 0x49, 0xee, 0x2d, 0x4f, 0x8f, 0x3b, 0x47, 0x87, 0x6d, 0x46, 0xd6, 0x3e, 0x69, 0x64, + 0x2a, 0xce, 0xcb, 0x2f, 0xfc, 0x97, 0x05, 0x7a, 0xac, 0x7f, 0xd5, 0x1a, 0x4b, 0x0e, 0xa7, 0x5a, + 0x28, 0x14, 0x3f, 0x29, 0x88, 0x3c, 0x4c, 0x02, 0xb8, 0xda, 0xb0, 0x17, 0x55, 0x1f, 0x8a, 0x7d, + 0x57, 0xc7, 0x8d, 0x74, 0xb7, 0xc4, 0x9f, 0x72, 0x7e, 0x15, 0x22, 0x12, 0x58, 0x07, 0x99, 0x34, + 0x6e, 0x50, 0xde, 0x68, 0x65, 0xbc, 0xdb, 0xf8, 0xc8, 0xa8, 0x2b, 0x40, 0xdc, 0xfe, 0x32, 0xa4, + 0xca, 0x10, 0x21, 0xf0, 0xd3, 0x5d, 0x0f, 0x00, 0x6f, 0x9d, 0x36, 0x42, 0x4a, 0x5e, 0xc1, 0xe0, + }, + { + 0x75, 0xf3, 0xc6, 0xf4, 0xdb, 0x7b, 0xfb, 0xc8, 0x4a, 0xd3, 0xe6, 0x6b, 0x45, 0x7d, 0xe8, 0x4b, + 0xd6, 0x32, 0xd8, 0xfd, 0x37, 0x71, 0xf1, 0xe1, 0x30, 0x0f, 0xf8, 0x1b, 0x87, 0xfa, 0x06, 0x3f, + 0x5e, 0xba, 0xae, 0x5b, 0x8a, 0x00, 0xbc, 0x9d, 0x6d, 0xc1, 0xb1, 0x0e, 0x80, 0x5d, 0xd2, 0xd5, + 0xa0, 0x84, 0x07, 0x14, 0xb5, 0x90, 0x2c, 0xa3, 0xb2, 0x73, 0x4c, 0x54, 0x92, 0x74, 0x36, 0x51, + 0x38, 0xb0, 0xbd, 0x5a, 0xfc, 0x60, 0x62, 0x96, 0x6c, 0x42, 0xf7, 0x10, 0x7c, 0x28, 0x27, 0x8c, + 0x13, 0x95, 0x9c, 0xc7, 0x24, 0x46, 0x3b, 0x70, 0xca, 0xe3, 0x85, 0xcb, 0x11, 0xd0, 0x93, 0xb8, + 0xa6, 0x83, 0x20, 0xff, 0x9f, 0x77, 0xc3, 0xcc, 0x03, 0x6f, 0x08, 0xbf, 0x40, 0xe7, 0x2b, 0xe2, + 0x79, 0x0c, 0xaa, 0x82, 0x41, 0x3a, 0xea, 0xb9, 0xe4, 0x9a, 0xa4, 0x97, 0x7e, 0xda, 0x7a, 0x17, + 0x66, 0x94, 0xa1, 0x1d, 0x3d, 0xf0, 0xde, 0xb3, 0x0b, 0x72, 0xa7, 0x1c, 0xef, 0xd1, 0x53, 0x3e, + 0x8f, 0x33, 0x26, 0x5f, 0xec, 0x76, 0x2a, 0x49, 0x81, 0x88, 0xee, 0x21, 0xc4, 0x1a, 0xeb, 0xd9, + 0xc5, 0x39, 0x99, 0xcd, 0xad, 0x31, 0x8b, 0x01, 0x18, 0x23, 0xdd, 0x1f, 0x4e, 0x2d, 0xf9, 0x48, + 0x4f, 0xf2, 0x65, 0x8e, 0x78, 0x5c, 0x58, 0x19, 0x8d, 0xe5, 0x98, 0x57, 0x67, 0x7f, 0x05, 0x64, + 0xaf, 0x63, 0xb6, 0xfe, 0xf5, 0xb7, 0x3c, 0xa5, 0xce, 0xe9, 0x68, 0x44, 0xe0, 0x4d, 0x43, 0x69, + 0x29, 0x2e, 0xac, 0x15, 0x59, 0xa8, 0x0a, 0x9e, 0x6e, 0x47, 0xdf, 0x34, 0x35, 0x6a, 0xcf, 0xdc, + 0x22, 0xc9, 0xc0, 0x9b, 0x89, 0xd4, 0xed, 0xab, 0x12, 0xa2, 0x0d, 0x52, 0xbb, 0x02, 0x2f, 0xa9, + 0xd7, 0x61, 0x1e, 0xb4, 0x50, 0x04, 0xf6, 0xc2, 0x16, 0x25, 0x86, 0x56, 0x55, 0x09, 0xbe, 0x91, + }, +} + +// gfMult returns a·b in GF(2^8)/p +func gfMult(a, b byte, p uint32) byte { + B := [2]uint32{0, uint32(b)} + P := [2]uint32{0, p} + var result uint32 + + // branchless GF multiplier + for i := 0; i < 7; i++ { + result ^= B[a&1] + a >>= 1 + B[1] = P[B[1]>>7] ^ (B[1] << 1) + } + result ^= B[a&1] + return byte(result) +} + +// mdsColumnMult calculates y{col} where [y0 y1 y2 y3] = MDS · [x0] +func mdsColumnMult(in byte, col int) uint32 { + mul01 := in + mul5B := gfMult(in, 0x5B, mdsPolynomial) + mulEF := gfMult(in, 0xEF, mdsPolynomial) + + switch col { + case 0: + return uint32(mul01) | uint32(mul5B)<<8 | uint32(mulEF)<<16 | uint32(mulEF)<<24 + case 1: + return uint32(mulEF) | uint32(mulEF)<<8 | uint32(mul5B)<<16 | uint32(mul01)<<24 + case 2: + return uint32(mul5B) | uint32(mulEF)<<8 | uint32(mul01)<<16 | uint32(mulEF)<<24 + case 3: + return uint32(mul5B) | uint32(mul01)<<8 | uint32(mulEF)<<16 | uint32(mul5B)<<24 + } + + panic("unreachable") +} + +// h implements the S-box generation function. See [TWOFISH] 4.3.5 +func h(in, key []byte, offset int) uint32 { + var y [4]byte + for x := range y { + y[x] = in[x] + } + switch len(key) / 8 { + case 4: + y[0] = sbox[1][y[0]] ^ key[4*(6+offset)+0] + y[1] = sbox[0][y[1]] ^ key[4*(6+offset)+1] + y[2] = sbox[0][y[2]] ^ key[4*(6+offset)+2] + y[3] = sbox[1][y[3]] ^ key[4*(6+offset)+3] + fallthrough + case 3: + y[0] = sbox[1][y[0]] ^ key[4*(4+offset)+0] + y[1] = sbox[1][y[1]] ^ key[4*(4+offset)+1] + y[2] = sbox[0][y[2]] ^ key[4*(4+offset)+2] + y[3] = sbox[0][y[3]] ^ key[4*(4+offset)+3] + fallthrough + case 2: + y[0] = sbox[1][sbox[0][sbox[0][y[0]]^key[4*(2+offset)+0]]^key[4*(0+offset)+0]] + y[1] = sbox[0][sbox[0][sbox[1][y[1]]^key[4*(2+offset)+1]]^key[4*(0+offset)+1]] + y[2] = sbox[1][sbox[1][sbox[0][y[2]]^key[4*(2+offset)+2]]^key[4*(0+offset)+2]] + y[3] = sbox[0][sbox[1][sbox[1][y[3]]^key[4*(2+offset)+3]]^key[4*(0+offset)+3]] + } + // [y0 y1 y2 y3] = MDS . [x0 x1 x2 x3] + var mdsMult uint32 + for i := range y { + mdsMult ^= mdsColumnMult(y[i], i) + } + return mdsMult +} + +// Encrypt encrypts a 16-byte block from src to dst, which may overlap. +// Note that for amounts of data larger than a block, +// it is not safe to just call Encrypt on successive blocks; +// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go). +func (c *Cipher) Encrypt(dst, src []byte) { + S1 := c.s[0] + S2 := c.s[1] + S3 := c.s[2] + S4 := c.s[3] + + // Load input + ia := load32l(src[0:4]) + ib := load32l(src[4:8]) + ic := load32l(src[8:12]) + id := load32l(src[12:16]) + + // Pre-whitening + ia ^= c.k[0] + ib ^= c.k[1] + ic ^= c.k[2] + id ^= c.k[3] + + for i := 0; i < 8; i++ { + k := c.k[8+i*4 : 12+i*4] + t2 := S2[byte(ib)] ^ S3[byte(ib>>8)] ^ S4[byte(ib>>16)] ^ S1[byte(ib>>24)] + t1 := S1[byte(ia)] ^ S2[byte(ia>>8)] ^ S3[byte(ia>>16)] ^ S4[byte(ia>>24)] + t2 + ic = bits.RotateLeft32(ic^(t1+k[0]), -1) + id = bits.RotateLeft32(id, 1) ^ (t2 + t1 + k[1]) + + t2 = S2[byte(id)] ^ S3[byte(id>>8)] ^ S4[byte(id>>16)] ^ S1[byte(id>>24)] + t1 = S1[byte(ic)] ^ S2[byte(ic>>8)] ^ S3[byte(ic>>16)] ^ S4[byte(ic>>24)] + t2 + ia = bits.RotateLeft32(ia^(t1+k[2]), -1) + ib = bits.RotateLeft32(ib, 1) ^ (t2 + t1 + k[3]) + } + + // Output with "undo last swap" + ta := ic ^ c.k[4] + tb := id ^ c.k[5] + tc := ia ^ c.k[6] + td := ib ^ c.k[7] + + store32l(dst[0:4], ta) + store32l(dst[4:8], tb) + store32l(dst[8:12], tc) + store32l(dst[12:16], td) +} + +// Decrypt decrypts a 16-byte block from src to dst, which may overlap. +func (c *Cipher) Decrypt(dst, src []byte) { + S1 := c.s[0] + S2 := c.s[1] + S3 := c.s[2] + S4 := c.s[3] + + // Load input + ta := load32l(src[0:4]) + tb := load32l(src[4:8]) + tc := load32l(src[8:12]) + td := load32l(src[12:16]) + + // Undo undo final swap + ia := tc ^ c.k[6] + ib := td ^ c.k[7] + ic := ta ^ c.k[4] + id := tb ^ c.k[5] + + for i := 8; i > 0; i-- { + k := c.k[4+i*4 : 8+i*4] + t2 := S2[byte(id)] ^ S3[byte(id>>8)] ^ S4[byte(id>>16)] ^ S1[byte(id>>24)] + t1 := S1[byte(ic)] ^ S2[byte(ic>>8)] ^ S3[byte(ic>>16)] ^ S4[byte(ic>>24)] + t2 + ia = bits.RotateLeft32(ia, 1) ^ (t1 + k[2]) + ib = bits.RotateLeft32(ib^(t2+t1+k[3]), -1) + + t2 = S2[byte(ib)] ^ S3[byte(ib>>8)] ^ S4[byte(ib>>16)] ^ S1[byte(ib>>24)] + t1 = S1[byte(ia)] ^ S2[byte(ia>>8)] ^ S3[byte(ia>>16)] ^ S4[byte(ia>>24)] + t2 + ic = bits.RotateLeft32(ic, 1) ^ (t1 + k[0]) + id = bits.RotateLeft32(id^(t2+t1+k[1]), -1) + } + + // Undo pre-whitening + ia ^= c.k[0] + ib ^= c.k[1] + ic ^= c.k[2] + id ^= c.k[3] + + store32l(dst[0:4], ia) + store32l(dst[4:8], ib) + store32l(dst[8:12], ic) + store32l(dst[12:16], id) +} diff --git a/vendor/golang.org/x/crypto/xts/xts.go b/vendor/golang.org/x/crypto/xts/xts.go new file mode 100644 index 00000000..8c16a830 --- /dev/null +++ b/vendor/golang.org/x/crypto/xts/xts.go @@ -0,0 +1,164 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xts implements the XTS cipher mode as specified in IEEE P1619/D16. +// +// XTS mode is typically used for disk encryption, which presents a number of +// novel problems that make more common modes inapplicable. The disk is +// conceptually an array of sectors and we must be able to encrypt and decrypt +// a sector in isolation. However, an attacker must not be able to transpose +// two sectors of plaintext by transposing their ciphertext. +// +// XTS wraps a block cipher with Rogaway's XEX mode in order to build a +// tweakable block cipher. This allows each sector to have a unique tweak and +// effectively create a unique key for each sector. +// +// XTS does not provide any authentication. An attacker can manipulate the +// ciphertext and randomise a block (16 bytes) of the plaintext. This package +// does not implement ciphertext-stealing so sectors must be a multiple of 16 +// bytes. +// +// Note that XTS is usually not appropriate for any use besides disk encryption. +// Most users should use an AEAD mode like GCM (from crypto/cipher.NewGCM) instead. +package xts // import "golang.org/x/crypto/xts" + +import ( + "crypto/cipher" + "encoding/binary" + "errors" + "sync" + + "golang.org/x/crypto/internal/alias" +) + +// Cipher contains an expanded key structure. It is safe for concurrent use if +// the underlying block cipher is safe for concurrent use. +type Cipher struct { + k1, k2 cipher.Block +} + +// blockSize is the block size that the underlying cipher must have. XTS is +// only defined for 16-byte ciphers. +const blockSize = 16 + +var tweakPool = sync.Pool{ + New: func() interface{} { + return new([blockSize]byte) + }, +} + +// NewCipher creates a Cipher given a function for creating the underlying +// block cipher (which must have a block size of 16 bytes). The key must be +// twice the length of the underlying cipher's key. +func NewCipher(cipherFunc func([]byte) (cipher.Block, error), key []byte) (c *Cipher, err error) { + c = new(Cipher) + if c.k1, err = cipherFunc(key[:len(key)/2]); err != nil { + return + } + c.k2, err = cipherFunc(key[len(key)/2:]) + + if c.k1.BlockSize() != blockSize { + err = errors.New("xts: cipher does not have a block size of 16") + } + + return +} + +// Encrypt encrypts a sector of plaintext and puts the result into ciphertext. +// Plaintext and ciphertext must overlap entirely or not at all. +// Sectors must be a multiple of 16 bytes and less than 2²ⴠbytes. +func (c *Cipher) Encrypt(ciphertext, plaintext []byte, sectorNum uint64) { + if len(ciphertext) < len(plaintext) { + panic("xts: ciphertext is smaller than plaintext") + } + if len(plaintext)%blockSize != 0 { + panic("xts: plaintext is not a multiple of the block size") + } + if alias.InexactOverlap(ciphertext[:len(plaintext)], plaintext) { + panic("xts: invalid buffer overlap") + } + + tweak := tweakPool.Get().(*[blockSize]byte) + for i := range tweak { + tweak[i] = 0 + } + binary.LittleEndian.PutUint64(tweak[:8], sectorNum) + + c.k2.Encrypt(tweak[:], tweak[:]) + + for len(plaintext) > 0 { + for j := range tweak { + ciphertext[j] = plaintext[j] ^ tweak[j] + } + c.k1.Encrypt(ciphertext, ciphertext) + for j := range tweak { + ciphertext[j] ^= tweak[j] + } + plaintext = plaintext[blockSize:] + ciphertext = ciphertext[blockSize:] + + mul2(tweak) + } + + tweakPool.Put(tweak) +} + +// Decrypt decrypts a sector of ciphertext and puts the result into plaintext. +// Plaintext and ciphertext must overlap entirely or not at all. +// Sectors must be a multiple of 16 bytes and less than 2²ⴠbytes. +func (c *Cipher) Decrypt(plaintext, ciphertext []byte, sectorNum uint64) { + if len(plaintext) < len(ciphertext) { + panic("xts: plaintext is smaller than ciphertext") + } + if len(ciphertext)%blockSize != 0 { + panic("xts: ciphertext is not a multiple of the block size") + } + if alias.InexactOverlap(plaintext[:len(ciphertext)], ciphertext) { + panic("xts: invalid buffer overlap") + } + + tweak := tweakPool.Get().(*[blockSize]byte) + for i := range tweak { + tweak[i] = 0 + } + binary.LittleEndian.PutUint64(tweak[:8], sectorNum) + + c.k2.Encrypt(tweak[:], tweak[:]) + + for len(ciphertext) > 0 { + for j := range tweak { + plaintext[j] = ciphertext[j] ^ tweak[j] + } + c.k1.Decrypt(plaintext, plaintext) + for j := range tweak { + plaintext[j] ^= tweak[j] + } + plaintext = plaintext[blockSize:] + ciphertext = ciphertext[blockSize:] + + mul2(tweak) + } + + tweakPool.Put(tweak) +} + +// mul2 multiplies tweak by 2 in GF(2¹²â¸) with an irreducible polynomial of +// x¹²⸠+ xâ· + x² + x + 1. +func mul2(tweak *[blockSize]byte) { + var carryIn byte + for j := range tweak { + carryOut := tweak[j] >> 7 + tweak[j] = (tweak[j] << 1) + carryIn + carryIn = carryOut + } + if carryIn != 0 { + // If we have a carry bit then we need to subtract a multiple + // of the irreducible polynomial (x¹²⸠+ xâ· + x² + x + 1). + // By dropping the carry bit, we're subtracting the x^128 term + // so all that remains is to subtract xâ· + x² + x + 1. + // Subtraction (and addition) in this representation is just + // XOR. + tweak[0] ^= 1<<7 | 1<<2 | 1<<1 | 1 + } +} diff --git a/vendor/golang.org/x/net/http2/databuffer.go b/vendor/golang.org/x/net/http2/databuffer.go index a3067f8d..e6f55cbd 100644 --- a/vendor/golang.org/x/net/http2/databuffer.go +++ b/vendor/golang.org/x/net/http2/databuffer.go @@ -20,41 +20,44 @@ import ( // TODO: Benchmark to determine if the pools are necessary. The GC may have // improved enough that we can instead allocate chunks like this: // make([]byte, max(16<<10, expectedBytesRemaining)) -var ( - dataChunkSizeClasses = []int{ - 1 << 10, - 2 << 10, - 4 << 10, - 8 << 10, - 16 << 10, - } - dataChunkPools = [...]sync.Pool{ - {New: func() interface{} { return make([]byte, 1<<10) }}, - {New: func() interface{} { return make([]byte, 2<<10) }}, - {New: func() interface{} { return make([]byte, 4<<10) }}, - {New: func() interface{} { return make([]byte, 8<<10) }}, - {New: func() interface{} { return make([]byte, 16<<10) }}, - } -) +var dataChunkPools = [...]sync.Pool{ + {New: func() interface{} { return new([1 << 10]byte) }}, + {New: func() interface{} { return new([2 << 10]byte) }}, + {New: func() interface{} { return new([4 << 10]byte) }}, + {New: func() interface{} { return new([8 << 10]byte) }}, + {New: func() interface{} { return new([16 << 10]byte) }}, +} func getDataBufferChunk(size int64) []byte { - i := 0 - for ; i < len(dataChunkSizeClasses)-1; i++ { - if size <= int64(dataChunkSizeClasses[i]) { - break - } + switch { + case size <= 1<<10: + return dataChunkPools[0].Get().(*[1 << 10]byte)[:] + case size <= 2<<10: + return dataChunkPools[1].Get().(*[2 << 10]byte)[:] + case size <= 4<<10: + return dataChunkPools[2].Get().(*[4 << 10]byte)[:] + case size <= 8<<10: + return dataChunkPools[3].Get().(*[8 << 10]byte)[:] + default: + return dataChunkPools[4].Get().(*[16 << 10]byte)[:] } - return dataChunkPools[i].Get().([]byte) } func putDataBufferChunk(p []byte) { - for i, n := range dataChunkSizeClasses { - if len(p) == n { - dataChunkPools[i].Put(p) - return - } + switch len(p) { + case 1 << 10: + dataChunkPools[0].Put((*[1 << 10]byte)(p)) + case 2 << 10: + dataChunkPools[1].Put((*[2 << 10]byte)(p)) + case 4 << 10: + dataChunkPools[2].Put((*[4 << 10]byte)(p)) + case 8 << 10: + dataChunkPools[3].Put((*[8 << 10]byte)(p)) + case 16 << 10: + dataChunkPools[4].Put((*[16 << 10]byte)(p)) + default: + panic(fmt.Sprintf("unexpected buffer len=%v", len(p))) } - panic(fmt.Sprintf("unexpected buffer len=%v", len(p))) } // dataBuffer is an io.ReadWriter backed by a list of data chunks. diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index c1f6b90d..43557ab7 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -1510,13 +1510,12 @@ func (mh *MetaHeadersFrame) checkPseudos() error { } func (fr *Framer) maxHeaderStringLen() int { - v := fr.maxHeaderListSize() - if uint32(int(v)) == v { - return int(v) + v := int(fr.maxHeaderListSize()) + if v < 0 { + // If maxHeaderListSize overflows an int, use no limit (0). + return 0 } - // They had a crazy big number for MaxHeaderBytes anyway, - // so give them unlimited header lengths: - return 0 + return v } // readMetaFrame returns 0 or more CONTINUATION frames from fr and @@ -1565,6 +1564,7 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { if size > remainSize { hdec.SetEmitEnabled(false) mh.Truncated = true + remainSize = 0 return } remainSize -= size @@ -1577,6 +1577,36 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { var hc headersOrContinuation = hf for { frag := hc.HeaderBlockFragment() + + // Avoid parsing large amounts of headers that we will then discard. + // If the sender exceeds the max header list size by too much, + // skip parsing the fragment and close the connection. + // + // "Too much" is either any CONTINUATION frame after we've already + // exceeded the max header list size (in which case remainSize is 0), + // or a frame whose encoded size is more than twice the remaining + // header list bytes we're willing to accept. + if int64(len(frag)) > int64(2*remainSize) { + if VerboseLogs { + log.Printf("http2: header list too large") + } + // It would be nice to send a RST_STREAM before sending the GOAWAY, + // but the structure of the server's frame writer makes this difficult. + return nil, ConnectionError(ErrCodeProtocol) + } + + // Also close the connection after any CONTINUATION frame following an + // invalid header, since we stop tracking the size of the headers after + // an invalid one. + if invalid != nil { + if VerboseLogs { + log.Printf("http2: invalid header: %v", invalid) + } + // It would be nice to send a RST_STREAM before sending the GOAWAY, + // but the structure of the server's frame writer makes this difficult. + return nil, ConnectionError(ErrCodeProtocol) + } + if _, err := hdec.Write(frag); err != nil { return nil, ConnectionError(ErrCodeCompression) } diff --git a/vendor/golang.org/x/net/http2/go111.go b/vendor/golang.org/x/net/http2/go111.go deleted file mode 100644 index 5bf62b03..00000000 --- a/vendor/golang.org/x/net/http2/go111.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.11 -// +build go1.11 - -package http2 - -import ( - "net/http/httptrace" - "net/textproto" -) - -func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { - return trace != nil && trace.WroteHeaderField != nil -} - -func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) { - if trace != nil && trace.WroteHeaderField != nil { - trace.WroteHeaderField(k, []string{v}) - } -} - -func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { - if trace != nil { - return trace.Got1xxResponse - } - return nil -} diff --git a/vendor/golang.org/x/net/http2/go115.go b/vendor/golang.org/x/net/http2/go115.go deleted file mode 100644 index 908af1ab..00000000 --- a/vendor/golang.org/x/net/http2/go115.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.15 -// +build go1.15 - -package http2 - -import ( - "context" - "crypto/tls" -) - -// dialTLSWithContext uses tls.Dialer, added in Go 1.15, to open a TLS -// connection. -func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) { - dialer := &tls.Dialer{ - Config: cfg, - } - cn, err := dialer.DialContext(ctx, network, addr) - if err != nil { - return nil, err - } - tlsCn := cn.(*tls.Conn) // DialContext comment promises this will always succeed - return tlsCn, nil -} diff --git a/vendor/golang.org/x/net/http2/go118.go b/vendor/golang.org/x/net/http2/go118.go deleted file mode 100644 index aca4b2b3..00000000 --- a/vendor/golang.org/x/net/http2/go118.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.18 -// +build go1.18 - -package http2 - -import ( - "crypto/tls" - "net" -) - -func tlsUnderlyingConn(tc *tls.Conn) net.Conn { - return tc.NetConn() -} diff --git a/vendor/golang.org/x/net/http2/not_go111.go b/vendor/golang.org/x/net/http2/not_go111.go deleted file mode 100644 index cc0baa81..00000000 --- a/vendor/golang.org/x/net/http2/not_go111.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.11 -// +build !go1.11 - -package http2 - -import ( - "net/http/httptrace" - "net/textproto" -) - -func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { return false } - -func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {} - -func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { - return nil -} diff --git a/vendor/golang.org/x/net/http2/not_go115.go b/vendor/golang.org/x/net/http2/not_go115.go deleted file mode 100644 index e6c04cf7..00000000 --- a/vendor/golang.org/x/net/http2/not_go115.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.15 -// +build !go1.15 - -package http2 - -import ( - "context" - "crypto/tls" -) - -// dialTLSWithContext opens a TLS connection. -func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) { - cn, err := tls.Dial(network, addr, cfg) - if err != nil { - return nil, err - } - if err := cn.Handshake(); err != nil { - return nil, err - } - if cfg.InsecureSkipVerify { - return cn, nil - } - if err := cn.VerifyHostname(cfg.ServerName); err != nil { - return nil, err - } - return cn, nil -} diff --git a/vendor/golang.org/x/net/http2/not_go118.go b/vendor/golang.org/x/net/http2/not_go118.go deleted file mode 100644 index eab532c9..00000000 --- a/vendor/golang.org/x/net/http2/not_go118.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.18 -// +build !go1.18 - -package http2 - -import ( - "crypto/tls" - "net" -) - -func tlsUnderlyingConn(tc *tls.Conn) net.Conn { - return nil -} diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go index 684d984f..3b9f06b9 100644 --- a/vendor/golang.org/x/net/http2/pipe.go +++ b/vendor/golang.org/x/net/http2/pipe.go @@ -77,7 +77,10 @@ func (p *pipe) Read(d []byte) (n int, err error) { } } -var errClosedPipeWrite = errors.New("write on closed buffer") +var ( + errClosedPipeWrite = errors.New("write on closed buffer") + errUninitializedPipeWrite = errors.New("write on uninitialized buffer") +) // Write copies bytes from p into the buffer and wakes a reader. // It is an error to write more data than the buffer can hold. @@ -91,6 +94,12 @@ func (p *pipe) Write(d []byte) (n int, err error) { if p.err != nil || p.breakErr != nil { return 0, errClosedPipeWrite } + // pipe.setBuffer is never invoked, leaving the buffer uninitialized. + // We shouldn't try to write to an uninitialized pipe, + // but returning an error is better than panicking. + if p.b == nil { + return 0, errUninitializedPipeWrite + } return p.b.Write(d) } diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 02c88b6b..ce2e8b40 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -124,6 +124,7 @@ type Server struct { // IdleTimeout specifies how long until idle clients should be // closed with a GOAWAY frame. PING frames are not considered // activity for the purposes of IdleTimeout. + // If zero or negative, there is no timeout. IdleTimeout time.Duration // MaxUploadBufferPerConnection is the size of the initial flow @@ -434,7 +435,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { // passes the connection off to us with the deadline already set. // Write deadlines are set per stream in serverConn.newStream. // Disarm the net.Conn write deadline here. - if sc.hs.WriteTimeout != 0 { + if sc.hs.WriteTimeout > 0 { sc.conn.SetWriteDeadline(time.Time{}) } @@ -924,7 +925,7 @@ func (sc *serverConn) serve() { sc.setConnState(http.StateActive) sc.setConnState(http.StateIdle) - if sc.srv.IdleTimeout != 0 { + if sc.srv.IdleTimeout > 0 { sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) defer sc.idleTimer.Stop() } @@ -1637,7 +1638,7 @@ func (sc *serverConn) closeStream(st *stream, err error) { delete(sc.streams, st.id) if len(sc.streams) == 0 { sc.setConnState(http.StateIdle) - if sc.srv.IdleTimeout != 0 { + if sc.srv.IdleTimeout > 0 { sc.idleTimer.Reset(sc.srv.IdleTimeout) } if h1ServerKeepAlivesDisabled(sc.hs) { @@ -2017,7 +2018,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { // similar to how the http1 server works. Here it's // technically more like the http1 Server's ReadHeaderTimeout // (in Go 1.8), though. That's a more sane option anyway. - if sc.hs.ReadTimeout != 0 { + if sc.hs.ReadTimeout > 0 { sc.conn.SetReadDeadline(time.Time{}) st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout) } @@ -2038,7 +2039,7 @@ func (sc *serverConn) upgradeRequest(req *http.Request) { // Disable any read deadline set by the net/http package // prior to the upgrade. - if sc.hs.ReadTimeout != 0 { + if sc.hs.ReadTimeout > 0 { sc.conn.SetReadDeadline(time.Time{}) } @@ -2116,7 +2117,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream st.flow.conn = &sc.flow // link to conn-level counter st.flow.add(sc.initialStreamSendWindowSize) st.inflow.init(sc.srv.initialStreamRecvWindowSize()) - if sc.hs.WriteTimeout != 0 { + if sc.hs.WriteTimeout > 0 { st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) } @@ -2549,7 +2550,6 @@ type responseWriterState struct { wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet. sentHeader bool // have we sent the header frame? handlerDone bool // handler has finished - dirty bool // a Write failed; don't reuse this responseWriterState sentContentLen int64 // non-zero if handler set a Content-Length header wroteBytes int64 @@ -2669,7 +2669,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { date: date, }) if err != nil { - rws.dirty = true return 0, err } if endStream { @@ -2690,7 +2689,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { if len(p) > 0 || endStream { // only send a 0 byte DATA frame if we're ending the stream. if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil { - rws.dirty = true return 0, err } } @@ -2702,9 +2700,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { trailers: rws.trailers, endStream: true, }) - if err != nil { - rws.dirty = true - } return len(p), err } return len(p), nil @@ -2920,14 +2915,12 @@ func (rws *responseWriterState) writeHeader(code int) { h.Del("Transfer-Encoding") } - if rws.conn.writeHeaders(rws.stream, &writeResHeaders{ + rws.conn.writeHeaders(rws.stream, &writeResHeaders{ streamID: rws.stream.id, httpResCode: code, h: h, endStream: rws.handlerDone && !rws.hasTrailers(), - }) != nil { - rws.dirty = true - } + }) return } @@ -2992,19 +2985,10 @@ func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, func (w *responseWriter) handlerDone() { rws := w.rws - dirty := rws.dirty rws.handlerDone = true w.Flush() w.rws = nil - if !dirty { - // Only recycle the pool if all prior Write calls to - // the serverConn goroutine completed successfully. If - // they returned earlier due to resets from the peer - // there might still be write goroutines outstanding - // from the serverConn referencing the rws memory. See - // issue 20704. - responseWriterStatePool.Put(rws) - } + responseWriterStatePool.Put(rws) } // Push errors. @@ -3187,6 +3171,7 @@ func (sc *serverConn) startPush(msg *startPushRequest) { panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err)) } + sc.curHandlers++ go sc.runHandler(rw, req, sc.handler.ServeHTTP) return promisedID, nil } diff --git a/vendor/golang.org/x/net/http2/testsync.go b/vendor/golang.org/x/net/http2/testsync.go new file mode 100644 index 00000000..61075bd1 --- /dev/null +++ b/vendor/golang.org/x/net/http2/testsync.go @@ -0,0 +1,331 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package http2 + +import ( + "context" + "sync" + "time" +) + +// testSyncHooks coordinates goroutines in tests. +// +// For example, a call to ClientConn.RoundTrip involves several goroutines, including: +// - the goroutine running RoundTrip; +// - the clientStream.doRequest goroutine, which writes the request; and +// - the clientStream.readLoop goroutine, which reads the response. +// +// Using testSyncHooks, a test can start a RoundTrip and identify when all these goroutines +// are blocked waiting for some condition such as reading the Request.Body or waiting for +// flow control to become available. +// +// The testSyncHooks also manage timers and synthetic time in tests. +// This permits us to, for example, start a request and cause it to time out waiting for +// response headers without resorting to time.Sleep calls. +type testSyncHooks struct { + // active/inactive act as a mutex and condition variable. + // + // - neither chan contains a value: testSyncHooks is locked. + // - active contains a value: unlocked, and at least one goroutine is not blocked + // - inactive contains a value: unlocked, and all goroutines are blocked + active chan struct{} + inactive chan struct{} + + // goroutine counts + total int // total goroutines + condwait map[*sync.Cond]int // blocked in sync.Cond.Wait + blocked []*testBlockedGoroutine // otherwise blocked + + // fake time + now time.Time + timers []*fakeTimer + + // Transport testing: Report various events. + newclientconn func(*ClientConn) + newstream func(*clientStream) +} + +// testBlockedGoroutine is a blocked goroutine. +type testBlockedGoroutine struct { + f func() bool // blocked until f returns true + ch chan struct{} // closed when unblocked +} + +func newTestSyncHooks() *testSyncHooks { + h := &testSyncHooks{ + active: make(chan struct{}, 1), + inactive: make(chan struct{}, 1), + condwait: map[*sync.Cond]int{}, + } + h.inactive <- struct{}{} + h.now = time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC) + return h +} + +// lock acquires the testSyncHooks mutex. +func (h *testSyncHooks) lock() { + select { + case <-h.active: + case <-h.inactive: + } +} + +// waitInactive waits for all goroutines to become inactive. +func (h *testSyncHooks) waitInactive() { + for { + <-h.inactive + if !h.unlock() { + break + } + } +} + +// unlock releases the testSyncHooks mutex. +// It reports whether any goroutines are active. +func (h *testSyncHooks) unlock() (active bool) { + // Look for a blocked goroutine which can be unblocked. + blocked := h.blocked[:0] + unblocked := false + for _, b := range h.blocked { + if !unblocked && b.f() { + unblocked = true + close(b.ch) + } else { + blocked = append(blocked, b) + } + } + h.blocked = blocked + + // Count goroutines blocked on condition variables. + condwait := 0 + for _, count := range h.condwait { + condwait += count + } + + if h.total > condwait+len(blocked) { + h.active <- struct{}{} + return true + } else { + h.inactive <- struct{}{} + return false + } +} + +// goRun starts a new goroutine. +func (h *testSyncHooks) goRun(f func()) { + h.lock() + h.total++ + h.unlock() + go func() { + defer func() { + h.lock() + h.total-- + h.unlock() + }() + f() + }() +} + +// blockUntil indicates that a goroutine is blocked waiting for some condition to become true. +// It waits until f returns true before proceeding. +// +// Example usage: +// +// h.blockUntil(func() bool { +// // Is the context done yet? +// select { +// case <-ctx.Done(): +// default: +// return false +// } +// return true +// }) +// // Wait for the context to become done. +// <-ctx.Done() +// +// The function f passed to blockUntil must be non-blocking and idempotent. +func (h *testSyncHooks) blockUntil(f func() bool) { + if f() { + return + } + ch := make(chan struct{}) + h.lock() + h.blocked = append(h.blocked, &testBlockedGoroutine{ + f: f, + ch: ch, + }) + h.unlock() + <-ch +} + +// broadcast is sync.Cond.Broadcast. +func (h *testSyncHooks) condBroadcast(cond *sync.Cond) { + h.lock() + delete(h.condwait, cond) + h.unlock() + cond.Broadcast() +} + +// broadcast is sync.Cond.Wait. +func (h *testSyncHooks) condWait(cond *sync.Cond) { + h.lock() + h.condwait[cond]++ + h.unlock() +} + +// newTimer creates a new fake timer. +func (h *testSyncHooks) newTimer(d time.Duration) timer { + h.lock() + defer h.unlock() + t := &fakeTimer{ + hooks: h, + when: h.now.Add(d), + c: make(chan time.Time), + } + h.timers = append(h.timers, t) + return t +} + +// afterFunc creates a new fake AfterFunc timer. +func (h *testSyncHooks) afterFunc(d time.Duration, f func()) timer { + h.lock() + defer h.unlock() + t := &fakeTimer{ + hooks: h, + when: h.now.Add(d), + f: f, + } + h.timers = append(h.timers, t) + return t +} + +func (h *testSyncHooks) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { + ctx, cancel := context.WithCancel(ctx) + t := h.afterFunc(d, cancel) + return ctx, func() { + t.Stop() + cancel() + } +} + +func (h *testSyncHooks) timeUntilEvent() time.Duration { + h.lock() + defer h.unlock() + var next time.Time + for _, t := range h.timers { + if next.IsZero() || t.when.Before(next) { + next = t.when + } + } + if d := next.Sub(h.now); d > 0 { + return d + } + return 0 +} + +// advance advances time and causes synthetic timers to fire. +func (h *testSyncHooks) advance(d time.Duration) { + h.lock() + defer h.unlock() + h.now = h.now.Add(d) + timers := h.timers[:0] + for _, t := range h.timers { + t := t // remove after go.mod depends on go1.22 + t.mu.Lock() + switch { + case t.when.After(h.now): + timers = append(timers, t) + case t.when.IsZero(): + // stopped timer + default: + t.when = time.Time{} + if t.c != nil { + close(t.c) + } + if t.f != nil { + h.total++ + go func() { + defer func() { + h.lock() + h.total-- + h.unlock() + }() + t.f() + }() + } + } + t.mu.Unlock() + } + h.timers = timers +} + +// A timer wraps a time.Timer, or a synthetic equivalent in tests. +// Unlike time.Timer, timer is single-use: The timer channel is closed when the timer expires. +type timer interface { + C() <-chan time.Time + Stop() bool + Reset(d time.Duration) bool +} + +// timeTimer implements timer using real time. +type timeTimer struct { + t *time.Timer + c chan time.Time +} + +// newTimeTimer creates a new timer using real time. +func newTimeTimer(d time.Duration) timer { + ch := make(chan time.Time) + t := time.AfterFunc(d, func() { + close(ch) + }) + return &timeTimer{t, ch} +} + +// newTimeAfterFunc creates an AfterFunc timer using real time. +func newTimeAfterFunc(d time.Duration, f func()) timer { + return &timeTimer{ + t: time.AfterFunc(d, f), + } +} + +func (t timeTimer) C() <-chan time.Time { return t.c } +func (t timeTimer) Stop() bool { return t.t.Stop() } +func (t timeTimer) Reset(d time.Duration) bool { return t.t.Reset(d) } + +// fakeTimer implements timer using fake time. +type fakeTimer struct { + hooks *testSyncHooks + + mu sync.Mutex + when time.Time // when the timer will fire + c chan time.Time // closed when the timer fires; mutually exclusive with f + f func() // called when the timer fires; mutually exclusive with c +} + +func (t *fakeTimer) C() <-chan time.Time { return t.c } + +func (t *fakeTimer) Stop() bool { + t.mu.Lock() + defer t.mu.Unlock() + stopped := t.when.IsZero() + t.when = time.Time{} + return stopped +} + +func (t *fakeTimer) Reset(d time.Duration) bool { + if t.c != nil || t.f == nil { + panic("fakeTimer only supports Reset on AfterFunc timers") + } + t.mu.Lock() + defer t.mu.Unlock() + t.hooks.lock() + defer t.hooks.unlock() + active := !t.when.IsZero() + t.when = t.hooks.now.Add(d) + if !active { + t.hooks.timers = append(t.hooks.timers, t) + } + return active +} diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 4515b22c..ce375c8c 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -147,6 +147,12 @@ type Transport struct { // waiting for their turn. StrictMaxConcurrentStreams bool + // IdleConnTimeout is the maximum amount of time an idle + // (keep-alive) connection will remain idle before closing + // itself. + // Zero means no limit. + IdleConnTimeout time.Duration + // ReadIdleTimeout is the timeout after which a health check using ping // frame will be carried out if no frame is received on the connection. // Note that a ping response will is considered a received frame, so if @@ -178,6 +184,8 @@ type Transport struct { connPoolOnce sync.Once connPoolOrDef ClientConnPool // non-nil version of ConnPool + + syncHooks *testSyncHooks } func (t *Transport) maxHeaderListSize() uint32 { @@ -302,7 +310,7 @@ type ClientConn struct { readerErr error // set before readerDone is closed idleTimeout time.Duration // or 0 for never - idleTimer *time.Timer + idleTimer timer mu sync.Mutex // guards following cond *sync.Cond // hold mu; broadcast on flow/closed changes @@ -344,6 +352,60 @@ type ClientConn struct { werr error // first write error that has occurred hbuf bytes.Buffer // HPACK encoder writes into this henc *hpack.Encoder + + syncHooks *testSyncHooks // can be nil +} + +// Hook points used for testing. +// Outside of tests, cc.syncHooks is nil and these all have minimal implementations. +// Inside tests, see the testSyncHooks function docs. + +// goRun starts a new goroutine. +func (cc *ClientConn) goRun(f func()) { + if cc.syncHooks != nil { + cc.syncHooks.goRun(f) + return + } + go f() +} + +// condBroadcast is cc.cond.Broadcast. +func (cc *ClientConn) condBroadcast() { + if cc.syncHooks != nil { + cc.syncHooks.condBroadcast(cc.cond) + } + cc.cond.Broadcast() +} + +// condWait is cc.cond.Wait. +func (cc *ClientConn) condWait() { + if cc.syncHooks != nil { + cc.syncHooks.condWait(cc.cond) + } + cc.cond.Wait() +} + +// newTimer creates a new time.Timer, or a synthetic timer in tests. +func (cc *ClientConn) newTimer(d time.Duration) timer { + if cc.syncHooks != nil { + return cc.syncHooks.newTimer(d) + } + return newTimeTimer(d) +} + +// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. +func (cc *ClientConn) afterFunc(d time.Duration, f func()) timer { + if cc.syncHooks != nil { + return cc.syncHooks.afterFunc(d, f) + } + return newTimeAfterFunc(d, f) +} + +func (cc *ClientConn) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { + if cc.syncHooks != nil { + return cc.syncHooks.contextWithTimeout(ctx, d) + } + return context.WithTimeout(ctx, d) } // clientStream is the state for a single HTTP/2 stream. One of these @@ -425,7 +487,7 @@ func (cs *clientStream) abortStreamLocked(err error) { // TODO(dneil): Clean up tests where cs.cc.cond is nil. if cs.cc.cond != nil { // Wake up writeRequestBody if it is waiting on flow control. - cs.cc.cond.Broadcast() + cs.cc.condBroadcast() } } @@ -435,7 +497,7 @@ func (cs *clientStream) abortRequestBodyWrite() { defer cc.mu.Unlock() if cs.reqBody != nil && cs.reqBodyClosed == nil { cs.closeReqBodyLocked() - cc.cond.Broadcast() + cc.condBroadcast() } } @@ -445,10 +507,10 @@ func (cs *clientStream) closeReqBodyLocked() { } cs.reqBodyClosed = make(chan struct{}) reqBodyClosed := cs.reqBodyClosed - go func() { + cs.cc.goRun(func() { cs.reqBody.Close() close(reqBodyClosed) - }() + }) } type stickyErrWriter struct { @@ -537,15 +599,6 @@ func authorityAddr(scheme string, authority string) (addr string) { return net.JoinHostPort(host, port) } -var retryBackoffHook func(time.Duration) *time.Timer - -func backoffNewTimer(d time.Duration) *time.Timer { - if retryBackoffHook != nil { - return retryBackoffHook(d) - } - return time.NewTimer(d) -} - // RoundTripOpt is like RoundTrip, but takes options. func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { @@ -573,13 +626,27 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res backoff := float64(uint(1) << (uint(retry) - 1)) backoff += backoff * (0.1 * mathrand.Float64()) d := time.Second * time.Duration(backoff) - timer := backoffNewTimer(d) + var tm timer + if t.syncHooks != nil { + tm = t.syncHooks.newTimer(d) + t.syncHooks.blockUntil(func() bool { + select { + case <-tm.C(): + case <-req.Context().Done(): + default: + return false + } + return true + }) + } else { + tm = newTimeTimer(d) + } select { - case <-timer.C: + case <-tm.C(): t.vlogf("RoundTrip retrying after failure: %v", roundTripErr) continue case <-req.Context().Done(): - timer.Stop() + tm.Stop() err = req.Context().Err() } } @@ -658,6 +725,9 @@ func canRetryError(err error) bool { } func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*ClientConn, error) { + if t.syncHooks != nil { + return t.newClientConn(nil, singleUse, t.syncHooks) + } host, _, err := net.SplitHostPort(addr) if err != nil { return nil, err @@ -666,7 +736,7 @@ func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse b if err != nil { return nil, err } - return t.newClientConn(tconn, singleUse) + return t.newClientConn(tconn, singleUse, nil) } func (t *Transport) newTLSConfig(host string) *tls.Config { @@ -732,10 +802,10 @@ func (t *Transport) maxEncoderHeaderTableSize() uint32 { } func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { - return t.newClientConn(c, t.disableKeepAlives()) + return t.newClientConn(c, t.disableKeepAlives(), nil) } -func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { +func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHooks) (*ClientConn, error) { cc := &ClientConn{ t: t, tconn: c, @@ -750,10 +820,15 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro wantSettingsAck: true, pings: make(map[[8]byte]chan struct{}), reqHeaderMu: make(chan struct{}, 1), + syncHooks: hooks, + } + if hooks != nil { + hooks.newclientconn(cc) + c = cc.tconn } if d := t.idleConnTimeout(); d != 0 { cc.idleTimeout = d - cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout) + cc.idleTimer = cc.afterFunc(d, cc.onIdleTimeout) } if VerboseLogs { t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) @@ -818,7 +893,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro return nil, cc.werr } - go cc.readLoop() + cc.goRun(cc.readLoop) return cc, nil } @@ -826,7 +901,7 @@ func (cc *ClientConn) healthCheck() { pingTimeout := cc.t.pingTimeout() // We don't need to periodically ping in the health check, because the readLoop of ClientConn will // trigger the healthCheck again if there is no frame received. - ctx, cancel := context.WithTimeout(context.Background(), pingTimeout) + ctx, cancel := cc.contextWithTimeout(context.Background(), pingTimeout) defer cancel() cc.vlogf("http2: Transport sending health check") err := cc.Ping(ctx) @@ -1018,7 +1093,7 @@ func (cc *ClientConn) forceCloseConn() { if !ok { return } - if nc := tlsUnderlyingConn(tc); nc != nil { + if nc := tc.NetConn(); nc != nil { nc.Close() } } @@ -1056,7 +1131,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { // Wait for all in-flight streams to complete or connection to close done := make(chan struct{}) cancelled := false // guarded by cc.mu - go func() { + cc.goRun(func() { cc.mu.Lock() defer cc.mu.Unlock() for { @@ -1068,9 +1143,9 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { if cancelled { break } - cc.cond.Wait() + cc.condWait() } - }() + }) shutdownEnterWaitStateHook() select { case <-done: @@ -1080,7 +1155,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { cc.mu.Lock() // Free the goroutine above cancelled = true - cc.cond.Broadcast() + cc.condBroadcast() cc.mu.Unlock() return ctx.Err() } @@ -1118,7 +1193,7 @@ func (cc *ClientConn) closeForError(err error) { for _, cs := range cc.streams { cs.abortStreamLocked(err) } - cc.cond.Broadcast() + cc.condBroadcast() cc.mu.Unlock() cc.closeConn() } @@ -1215,6 +1290,10 @@ func (cc *ClientConn) decrStreamReservationsLocked() { } func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { + return cc.roundTrip(req, nil) +} + +func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) (*http.Response, error) { ctx := req.Context() cs := &clientStream{ cc: cc, @@ -1229,9 +1308,23 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { respHeaderRecv: make(chan struct{}), donec: make(chan struct{}), } - go cs.doRequest(req) + cc.goRun(func() { + cs.doRequest(req) + }) waitDone := func() error { + if cc.syncHooks != nil { + cc.syncHooks.blockUntil(func() bool { + select { + case <-cs.donec: + case <-ctx.Done(): + case <-cs.reqCancel: + default: + return false + } + return true + }) + } select { case <-cs.donec: return nil @@ -1292,7 +1385,24 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { return err } + if streamf != nil { + streamf(cs) + } + for { + if cc.syncHooks != nil { + cc.syncHooks.blockUntil(func() bool { + select { + case <-cs.respHeaderRecv: + case <-cs.abort: + case <-ctx.Done(): + case <-cs.reqCancel: + default: + return false + } + return true + }) + } select { case <-cs.respHeaderRecv: return handleResponseHeaders() @@ -1348,6 +1458,21 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { if cc.reqHeaderMu == nil { panic("RoundTrip on uninitialized ClientConn") // for tests } + var newStreamHook func(*clientStream) + if cc.syncHooks != nil { + newStreamHook = cc.syncHooks.newstream + cc.syncHooks.blockUntil(func() bool { + select { + case cc.reqHeaderMu <- struct{}{}: + <-cc.reqHeaderMu + case <-cs.reqCancel: + case <-ctx.Done(): + default: + return false + } + return true + }) + } select { case cc.reqHeaderMu <- struct{}{}: case <-cs.reqCancel: @@ -1372,6 +1497,10 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { } cc.mu.Unlock() + if newStreamHook != nil { + newStreamHook(cs) + } + // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? if !cc.t.disableCompression() && req.Header.Get("Accept-Encoding") == "" && @@ -1452,15 +1581,30 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { var respHeaderTimer <-chan time.Time var respHeaderRecv chan struct{} if d := cc.responseHeaderTimeout(); d != 0 { - timer := time.NewTimer(d) + timer := cc.newTimer(d) defer timer.Stop() - respHeaderTimer = timer.C + respHeaderTimer = timer.C() respHeaderRecv = cs.respHeaderRecv } // Wait until the peer half-closes its end of the stream, // or until the request is aborted (via context, error, or otherwise), // whichever comes first. for { + if cc.syncHooks != nil { + cc.syncHooks.blockUntil(func() bool { + select { + case <-cs.peerClosed: + case <-respHeaderTimer: + case <-respHeaderRecv: + case <-cs.abort: + case <-ctx.Done(): + case <-cs.reqCancel: + default: + return false + } + return true + }) + } select { case <-cs.peerClosed: return nil @@ -1609,7 +1753,7 @@ func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error { return nil } cc.pendingRequests++ - cc.cond.Wait() + cc.condWait() cc.pendingRequests-- select { case <-cs.abort: @@ -1871,10 +2015,26 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) cs.flow.take(take) return take, nil } - cc.cond.Wait() + cc.condWait() } } +func validateHeaders(hdrs http.Header) string { + for k, vv := range hdrs { + if !httpguts.ValidHeaderFieldName(k) { + return fmt.Sprintf("name %q", k) + } + for _, v := range vv { + if !httpguts.ValidHeaderFieldValue(v) { + // Don't include the value in the error, + // because it may be sensitive. + return fmt.Sprintf("value for header %q", k) + } + } + } + return "" +} + var errNilRequestURL = errors.New("http2: Request.URI is nil") // requires cc.wmu be held. @@ -1912,19 +2072,14 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail } } - // Check for any invalid headers and return an error before we + // Check for any invalid headers+trailers and return an error before we // potentially pollute our hpack state. (We want to be able to // continue to reuse the hpack encoder for future requests) - for k, vv := range req.Header { - if !httpguts.ValidHeaderFieldName(k) { - return nil, fmt.Errorf("invalid HTTP header name %q", k) - } - for _, v := range vv { - if !httpguts.ValidHeaderFieldValue(v) { - // Don't include the value in the error, because it may be sensitive. - return nil, fmt.Errorf("invalid HTTP header value for header %q", k) - } - } + if err := validateHeaders(req.Header); err != "" { + return nil, fmt.Errorf("invalid HTTP header %s", err) + } + if err := validateHeaders(req.Trailer); err != "" { + return nil, fmt.Errorf("invalid HTTP trailer %s", err) } enumerateHeaders := func(f func(name, value string)) { @@ -2143,7 +2298,7 @@ func (cc *ClientConn) forgetStreamID(id uint32) { } // Wake up writeRequestBody via clientStream.awaitFlowControl and // wake up RoundTrip if there is a pending request. - cc.cond.Broadcast() + cc.condBroadcast() closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil if closeOnIdle && cc.streamsReserved == 0 && len(cc.streams) == 0 { @@ -2231,7 +2386,7 @@ func (rl *clientConnReadLoop) cleanup() { cs.abortStreamLocked(err) } } - cc.cond.Broadcast() + cc.condBroadcast() cc.mu.Unlock() } @@ -2266,10 +2421,9 @@ func (rl *clientConnReadLoop) run() error { cc := rl.cc gotSettings := false readIdleTimeout := cc.t.ReadIdleTimeout - var t *time.Timer + var t timer if readIdleTimeout != 0 { - t = time.AfterFunc(readIdleTimeout, cc.healthCheck) - defer t.Stop() + t = cc.afterFunc(readIdleTimeout, cc.healthCheck) } for { f, err := cc.fr.ReadFrame() @@ -2684,7 +2838,7 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error { }) return nil } - if !cs.firstByte { + if !cs.pastHeaders { cc.logf("protocol error: received DATA before a HEADERS frame") rl.endStreamError(cs, StreamError{ StreamID: f.StreamID, @@ -2867,7 +3021,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { for _, cs := range cc.streams { cs.flow.add(delta) } - cc.cond.Broadcast() + cc.condBroadcast() cc.initialWindowSize = s.Val case SettingHeaderTableSize: @@ -2911,9 +3065,18 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { fl = &cs.flow } if !fl.add(int32(f.Increment)) { + // For stream, the sender sends RST_STREAM with an error code of FLOW_CONTROL_ERROR + if cs != nil { + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, + Code: ErrCodeFlowControl, + }) + return nil + } + return ConnectionError(ErrCodeFlowControl) } - cc.cond.Broadcast() + cc.condBroadcast() return nil } @@ -2955,24 +3118,38 @@ func (cc *ClientConn) Ping(ctx context.Context) error { } cc.mu.Unlock() } - errc := make(chan error, 1) - go func() { + var pingError error + errc := make(chan struct{}) + cc.goRun(func() { cc.wmu.Lock() defer cc.wmu.Unlock() - if err := cc.fr.WritePing(false, p); err != nil { - errc <- err + if pingError = cc.fr.WritePing(false, p); pingError != nil { + close(errc) return } - if err := cc.bw.Flush(); err != nil { - errc <- err + if pingError = cc.bw.Flush(); pingError != nil { + close(errc) return } - }() + }) + if cc.syncHooks != nil { + cc.syncHooks.blockUntil(func() bool { + select { + case <-c: + case <-errc: + case <-ctx.Done(): + case <-cc.readerDone: + default: + return false + } + return true + }) + } select { case <-c: return nil - case err := <-errc: - return err + case <-errc: + return pingError case <-ctx.Done(): return ctx.Err() case <-cc.readerDone: @@ -3141,9 +3318,17 @@ func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, err } func (t *Transport) idleConnTimeout() time.Duration { + // to keep things backwards compatible, we use non-zero values of + // IdleConnTimeout, followed by using the IdleConnTimeout on the underlying + // http1 transport, followed by 0 + if t.IdleConnTimeout != 0 { + return t.IdleConnTimeout + } + if t.t1 != nil { return t.t1.IdleConnTimeout } + return 0 } @@ -3201,3 +3386,34 @@ func traceFirstResponseByte(trace *httptrace.ClientTrace) { trace.GotFirstResponseByte() } } + +func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { + return trace != nil && trace.WroteHeaderField != nil +} + +func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) { + if trace != nil && trace.WroteHeaderField != nil { + trace.WroteHeaderField(k, []string{v}) + } +} + +func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { + if trace != nil { + return trace.Got1xxResponse + } + return nil +} + +// dialTLSWithContext uses tls.Dialer, added in Go 1.15, to open a TLS +// connection. +func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) { + dialer := &tls.Dialer{ + Config: cfg, + } + cn, err := dialer.DialContext(ctx, network, addr) + if err != nil { + return nil, err + } + tlsCn := cn.(*tls.Conn) // DialContext comment promises this will always succeed + return tlsCn, nil +} diff --git a/vendor/golang.org/x/net/idna/go118.go b/vendor/golang.org/x/net/idna/go118.go index c5c4338d..712f1ad8 100644 --- a/vendor/golang.org/x/net/idna/go118.go +++ b/vendor/golang.org/x/net/idna/go118.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build go1.18 -// +build go1.18 package idna diff --git a/vendor/golang.org/x/net/idna/idna10.0.0.go b/vendor/golang.org/x/net/idna/idna10.0.0.go index 64ccf85f..7b371788 100644 --- a/vendor/golang.org/x/net/idna/idna10.0.0.go +++ b/vendor/golang.org/x/net/idna/idna10.0.0.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build go1.10 -// +build go1.10 // Package idna implements IDNA2008 using the compatibility processing // defined by UTS (Unicode Technical Standard) #46, which defines a standard to diff --git a/vendor/golang.org/x/net/idna/idna9.0.0.go b/vendor/golang.org/x/net/idna/idna9.0.0.go index ee1698ce..cc6a892a 100644 --- a/vendor/golang.org/x/net/idna/idna9.0.0.go +++ b/vendor/golang.org/x/net/idna/idna9.0.0.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build !go1.10 -// +build !go1.10 // Package idna implements IDNA2008 using the compatibility processing // defined by UTS (Unicode Technical Standard) #46, which defines a standard to diff --git a/vendor/golang.org/x/net/idna/pre_go118.go b/vendor/golang.org/x/net/idna/pre_go118.go index 3aaccab1..40e74bb3 100644 --- a/vendor/golang.org/x/net/idna/pre_go118.go +++ b/vendor/golang.org/x/net/idna/pre_go118.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build !go1.18 -// +build !go1.18 package idna diff --git a/vendor/golang.org/x/net/idna/tables10.0.0.go b/vendor/golang.org/x/net/idna/tables10.0.0.go index d1d62ef4..c6c2bf10 100644 --- a/vendor/golang.org/x/net/idna/tables10.0.0.go +++ b/vendor/golang.org/x/net/idna/tables10.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.10 && !go1.13 -// +build go1.10,!go1.13 package idna diff --git a/vendor/golang.org/x/net/idna/tables11.0.0.go b/vendor/golang.org/x/net/idna/tables11.0.0.go index 167efba7..76789393 100644 --- a/vendor/golang.org/x/net/idna/tables11.0.0.go +++ b/vendor/golang.org/x/net/idna/tables11.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.13 && !go1.14 -// +build go1.13,!go1.14 package idna diff --git a/vendor/golang.org/x/net/idna/tables12.0.0.go b/vendor/golang.org/x/net/idna/tables12.0.0.go index ab40f7bc..0600cd2a 100644 --- a/vendor/golang.org/x/net/idna/tables12.0.0.go +++ b/vendor/golang.org/x/net/idna/tables12.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.14 && !go1.16 -// +build go1.14,!go1.16 package idna diff --git a/vendor/golang.org/x/net/idna/tables13.0.0.go b/vendor/golang.org/x/net/idna/tables13.0.0.go index 66701ead..2fb768ef 100644 --- a/vendor/golang.org/x/net/idna/tables13.0.0.go +++ b/vendor/golang.org/x/net/idna/tables13.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.16 && !go1.21 -// +build go1.16,!go1.21 package idna diff --git a/vendor/golang.org/x/net/idna/tables15.0.0.go b/vendor/golang.org/x/net/idna/tables15.0.0.go index 40033778..5ff05fe1 100644 --- a/vendor/golang.org/x/net/idna/tables15.0.0.go +++ b/vendor/golang.org/x/net/idna/tables15.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.21 -// +build go1.21 package idna diff --git a/vendor/golang.org/x/net/idna/tables9.0.0.go b/vendor/golang.org/x/net/idna/tables9.0.0.go index 4074b533..0f25e84c 100644 --- a/vendor/golang.org/x/net/idna/tables9.0.0.go +++ b/vendor/golang.org/x/net/idna/tables9.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build !go1.10 -// +build !go1.10 package idna diff --git a/vendor/golang.org/x/net/idna/trie12.0.0.go b/vendor/golang.org/x/net/idna/trie12.0.0.go index bb63f904..8a75b966 100644 --- a/vendor/golang.org/x/net/idna/trie12.0.0.go +++ b/vendor/golang.org/x/net/idna/trie12.0.0.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build !go1.16 -// +build !go1.16 package idna diff --git a/vendor/golang.org/x/net/idna/trie13.0.0.go b/vendor/golang.org/x/net/idna/trie13.0.0.go index 7d68a8dc..fa45bb90 100644 --- a/vendor/golang.org/x/net/idna/trie13.0.0.go +++ b/vendor/golang.org/x/net/idna/trie13.0.0.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build go1.16 -// +build go1.16 package idna diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go index b18efb74..948a3ee6 100644 --- a/vendor/golang.org/x/sync/errgroup/errgroup.go +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -4,6 +4,9 @@ // Package errgroup provides synchronization, error propagation, and Context // cancelation for groups of goroutines working on subtasks of a common task. +// +// [errgroup.Group] is related to [sync.WaitGroup] but adds handling of tasks +// returning errors. package errgroup import ( diff --git a/vendor/golang.org/x/sys/execabs/execabs.go b/vendor/golang.org/x/sys/execabs/execabs.go deleted file mode 100644 index 3bf40fdf..00000000 --- a/vendor/golang.org/x/sys/execabs/execabs.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package execabs is a drop-in replacement for os/exec -// that requires PATH lookups to find absolute paths. -// That is, execabs.Command("cmd") runs the same PATH lookup -// as exec.Command("cmd"), but if the result is a path -// which is relative, the Run and Start methods will report -// an error instead of running the executable. -// -// See https://blog.golang.org/path-security for more information -// about when it may be necessary or appropriate to use this package. -package execabs - -import ( - "context" - "fmt" - "os/exec" - "path/filepath" - "reflect" - "unsafe" -) - -// ErrNotFound is the error resulting if a path search failed to find an executable file. -// It is an alias for exec.ErrNotFound. -var ErrNotFound = exec.ErrNotFound - -// Cmd represents an external command being prepared or run. -// It is an alias for exec.Cmd. -type Cmd = exec.Cmd - -// Error is returned by LookPath when it fails to classify a file as an executable. -// It is an alias for exec.Error. -type Error = exec.Error - -// An ExitError reports an unsuccessful exit by a command. -// It is an alias for exec.ExitError. -type ExitError = exec.ExitError - -func relError(file, path string) error { - return fmt.Errorf("%s resolves to executable in current directory (.%c%s)", file, filepath.Separator, path) -} - -// LookPath searches for an executable named file in the directories -// named by the PATH environment variable. If file contains a slash, -// it is tried directly and the PATH is not consulted. The result will be -// an absolute path. -// -// LookPath differs from exec.LookPath in its handling of PATH lookups, -// which are used for file names without slashes. If exec.LookPath's -// PATH lookup would have returned an executable from the current directory, -// LookPath instead returns an error. -func LookPath(file string) (string, error) { - path, err := exec.LookPath(file) - if err != nil && !isGo119ErrDot(err) { - return "", err - } - if filepath.Base(file) == file && !filepath.IsAbs(path) { - return "", relError(file, path) - } - return path, nil -} - -func fixCmd(name string, cmd *exec.Cmd) { - if filepath.Base(name) == name && !filepath.IsAbs(cmd.Path) && !isGo119ErrFieldSet(cmd) { - // exec.Command was called with a bare binary name and - // exec.LookPath returned a path which is not absolute. - // Set cmd.lookPathErr and clear cmd.Path so that it - // cannot be run. - lookPathErr := (*error)(unsafe.Pointer(reflect.ValueOf(cmd).Elem().FieldByName("lookPathErr").Addr().Pointer())) - if *lookPathErr == nil { - *lookPathErr = relError(name, cmd.Path) - } - cmd.Path = "" - } -} - -// CommandContext is like Command but includes a context. -// -// The provided context is used to kill the process (by calling os.Process.Kill) -// if the context becomes done before the command completes on its own. -func CommandContext(ctx context.Context, name string, arg ...string) *exec.Cmd { - cmd := exec.CommandContext(ctx, name, arg...) - fixCmd(name, cmd) - return cmd - -} - -// Command returns the Cmd struct to execute the named program with the given arguments. -// See exec.Command for most details. -// -// Command differs from exec.Command in its handling of PATH lookups, -// which are used when the program name contains no slashes. -// If exec.Command would have returned an exec.Cmd configured to run an -// executable from the current directory, Command instead -// returns an exec.Cmd that will return an error from Start or Run. -func Command(name string, arg ...string) *exec.Cmd { - cmd := exec.Command(name, arg...) - fixCmd(name, cmd) - return cmd -} diff --git a/vendor/golang.org/x/sys/execabs/execabs_go118.go b/vendor/golang.org/x/sys/execabs/execabs_go118.go deleted file mode 100644 index 5627d70e..00000000 --- a/vendor/golang.org/x/sys/execabs/execabs_go118.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.19 - -package execabs - -import "os/exec" - -func isGo119ErrDot(err error) bool { - return false -} - -func isGo119ErrFieldSet(cmd *exec.Cmd) bool { - return false -} diff --git a/vendor/golang.org/x/sys/execabs/execabs_go119.go b/vendor/golang.org/x/sys/execabs/execabs_go119.go deleted file mode 100644 index d60ab1b4..00000000 --- a/vendor/golang.org/x/sys/execabs/execabs_go119.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.19 - -package execabs - -import ( - "errors" - "os/exec" -) - -func isGo119ErrDot(err error) bool { - return errors.Is(err, exec.ErrDot) -} - -func isGo119ErrFieldSet(cmd *exec.Cmd) bool { - return cmd.Err != nil -} diff --git a/vendor/golang.org/x/sys/unix/aliases.go b/vendor/golang.org/x/sys/unix/aliases.go index e7d3df4b..b0e41985 100644 --- a/vendor/golang.org/x/sys/unix/aliases.go +++ b/vendor/golang.org/x/sys/unix/aliases.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) && go1.9 +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos package unix diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 6202638b..fdcaa974 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -248,6 +248,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -283,10 +284,6 @@ struct ltchars { #include #endif -#ifndef MSG_FASTOPEN -#define MSG_FASTOPEN 0x20000000 -#endif - #ifndef PTRACE_GETREGS #define PTRACE_GETREGS 0xc #endif @@ -295,14 +292,6 @@ struct ltchars { #define PTRACE_SETREGS 0xd #endif -#ifndef SOL_NETLINK -#define SOL_NETLINK 270 -#endif - -#ifndef SOL_SMC -#define SOL_SMC 286 -#endif - #ifdef SOL_BLUETOOTH // SPARC includes this in /usr/include/sparc64-linux-gnu/bits/socket.h // but it is already in bluetooth_linux.go @@ -319,10 +308,23 @@ struct ltchars { #undef TIPC_WAIT_FOREVER #define TIPC_WAIT_FOREVER 0xffffffff -// Copied from linux/l2tp.h -// Including linux/l2tp.h here causes conflicts between linux/in.h -// and netinet/in.h included via net/route.h above. -#define IPPROTO_L2TP 115 +// Copied from linux/netfilter/nf_nat.h +// Including linux/netfilter/nf_nat.h here causes conflicts between linux/in.h +// and netinet/in.h. +#define NF_NAT_RANGE_MAP_IPS (1 << 0) +#define NF_NAT_RANGE_PROTO_SPECIFIED (1 << 1) +#define NF_NAT_RANGE_PROTO_RANDOM (1 << 2) +#define NF_NAT_RANGE_PERSISTENT (1 << 3) +#define NF_NAT_RANGE_PROTO_RANDOM_FULLY (1 << 4) +#define NF_NAT_RANGE_PROTO_OFFSET (1 << 5) +#define NF_NAT_RANGE_NETMAP (1 << 6) +#define NF_NAT_RANGE_PROTO_RANDOM_ALL \ + (NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PROTO_RANDOM_FULLY) +#define NF_NAT_RANGE_MASK \ + (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED | \ + NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PERSISTENT | \ + NF_NAT_RANGE_PROTO_RANDOM_FULLY | NF_NAT_RANGE_PROTO_OFFSET | \ + NF_NAT_RANGE_NETMAP) // Copied from linux/hid.h. // Keep in sync with the size of the referenced fields. @@ -582,7 +584,7 @@ ccflags="$@" $2 ~ /^KEY_(SPEC|REQKEY_DEFL)_/ || $2 ~ /^KEYCTL_/ || $2 ~ /^PERF_/ || - $2 ~ /^SECCOMP_MODE_/ || + $2 ~ /^SECCOMP_/ || $2 ~ /^SEEK_/ || $2 ~ /^SCHED_/ || $2 ~ /^SPLICE_/ || @@ -603,6 +605,9 @@ ccflags="$@" $2 ~ /^FSOPT_/ || $2 ~ /^WDIO[CFS]_/ || $2 ~ /^NFN/ || + $2 !~ /^NFT_META_IIFTYPE/ && + $2 ~ /^NFT_/ || + $2 ~ /^NF_NAT_/ || $2 ~ /^XDP_/ || $2 ~ /^RWF_/ || $2 ~ /^(HDIO|WIN|SMART)_/ || diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go index 16dc6993..2f0fa76e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build darwin && go1.12 +//go:build darwin package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index 64d1bb4d..2b57e0f7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -13,6 +13,7 @@ package unix import ( + "errors" "sync" "unsafe" ) @@ -169,25 +170,26 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { func Uname(uname *Utsname) error { mib := []_C_int{CTL_KERN, KERN_OSTYPE} n := unsafe.Sizeof(uname.Sysname) - if err := sysctl(mib, &uname.Sysname[0], &n, nil, 0); err != nil { + // Suppress ENOMEM errors to be compatible with the C library __xuname() implementation. + if err := sysctl(mib, &uname.Sysname[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } mib = []_C_int{CTL_KERN, KERN_HOSTNAME} n = unsafe.Sizeof(uname.Nodename) - if err := sysctl(mib, &uname.Nodename[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Nodename[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } mib = []_C_int{CTL_KERN, KERN_OSRELEASE} n = unsafe.Sizeof(uname.Release) - if err := sysctl(mib, &uname.Release[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Release[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } mib = []_C_int{CTL_KERN, KERN_VERSION} n = unsafe.Sizeof(uname.Version) - if err := sysctl(mib, &uname.Version[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Version[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } @@ -205,7 +207,7 @@ func Uname(uname *Utsname) error { mib = []_C_int{CTL_HW, HW_MACHINE} n = unsafe.Sizeof(uname.Machine) - if err := sysctl(mib, &uname.Machine[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Machine[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 0f85e29e..5682e262 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1849,6 +1849,105 @@ func Dup2(oldfd, newfd int) error { //sys Fsmount(fd int, flags int, mountAttrs int) (fsfd int, err error) //sys Fsopen(fsName string, flags int) (fd int, err error) //sys Fspick(dirfd int, pathName string, flags int) (fd int, err error) + +//sys fsconfig(fd int, cmd uint, key *byte, value *byte, aux int) (err error) + +func fsconfigCommon(fd int, cmd uint, key string, value *byte, aux int) (err error) { + var keyp *byte + if keyp, err = BytePtrFromString(key); err != nil { + return + } + return fsconfig(fd, cmd, keyp, value, aux) +} + +// FsconfigSetFlag is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_FLAG. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +func FsconfigSetFlag(fd int, key string) (err error) { + return fsconfigCommon(fd, FSCONFIG_SET_FLAG, key, nil, 0) +} + +// FsconfigSetString is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_STRING. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// value is the parameter value to set. +func FsconfigSetString(fd int, key string, value string) (err error) { + var valuep *byte + if valuep, err = BytePtrFromString(value); err != nil { + return + } + return fsconfigCommon(fd, FSCONFIG_SET_STRING, key, valuep, 0) +} + +// FsconfigSetBinary is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_BINARY. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// value is the parameter value to set. +func FsconfigSetBinary(fd int, key string, value []byte) (err error) { + if len(value) == 0 { + return EINVAL + } + return fsconfigCommon(fd, FSCONFIG_SET_BINARY, key, &value[0], len(value)) +} + +// FsconfigSetPath is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_PATH. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// path is a non-empty path for specified key. +// atfd is a file descriptor at which to start lookup from or AT_FDCWD. +func FsconfigSetPath(fd int, key string, path string, atfd int) (err error) { + var valuep *byte + if valuep, err = BytePtrFromString(path); err != nil { + return + } + return fsconfigCommon(fd, FSCONFIG_SET_PATH, key, valuep, atfd) +} + +// FsconfigSetPathEmpty is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_PATH_EMPTY. The same as +// FconfigSetPath but with AT_PATH_EMPTY implied. +func FsconfigSetPathEmpty(fd int, key string, path string, atfd int) (err error) { + var valuep *byte + if valuep, err = BytePtrFromString(path); err != nil { + return + } + return fsconfigCommon(fd, FSCONFIG_SET_PATH_EMPTY, key, valuep, atfd) +} + +// FsconfigSetFd is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_FD. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// value is a file descriptor to be assigned to specified key. +func FsconfigSetFd(fd int, key string, value int) (err error) { + return fsconfigCommon(fd, FSCONFIG_SET_FD, key, nil, value) +} + +// FsconfigCreate is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_CMD_CREATE. +// +// fd is the filesystem context to act upon. +func FsconfigCreate(fd int) (err error) { + return fsconfig(fd, FSCONFIG_CMD_CREATE, nil, nil, 0) +} + +// FsconfigReconfigure is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_CMD_RECONFIGURE. +// +// fd is the filesystem context to act upon. +func FsconfigReconfigure(fd int) (err error) { + return fsconfig(fd, FSCONFIG_CMD_RECONFIGURE, nil, nil, 0) +} + //sys Getdents(fd int, buf []byte) (n int, err error) = SYS_GETDENTS64 //sysnb Getpgid(pid int) (pgid int, err error) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index c73cfe2f..36bf8399 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -1785,6 +1785,8 @@ const ( LANDLOCK_ACCESS_FS_REMOVE_FILE = 0x20 LANDLOCK_ACCESS_FS_TRUNCATE = 0x4000 LANDLOCK_ACCESS_FS_WRITE_FILE = 0x2 + LANDLOCK_ACCESS_NET_BIND_TCP = 0x1 + LANDLOCK_ACCESS_NET_CONNECT_TCP = 0x2 LANDLOCK_CREATE_RULESET_VERSION = 0x1 LINUX_REBOOT_CMD_CAD_OFF = 0x0 LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef @@ -2127,6 +2129,60 @@ const ( NFNL_SUBSYS_QUEUE = 0x3 NFNL_SUBSYS_ULOG = 0x4 NFS_SUPER_MAGIC = 0x6969 + NFT_CHAIN_FLAGS = 0x7 + NFT_CHAIN_MAXNAMELEN = 0x100 + NFT_CT_MAX = 0x17 + NFT_DATA_RESERVED_MASK = 0xffffff00 + NFT_DATA_VALUE_MAXLEN = 0x40 + NFT_EXTHDR_OP_MAX = 0x4 + NFT_FIB_RESULT_MAX = 0x3 + NFT_INNER_MASK = 0xf + NFT_LOGLEVEL_MAX = 0x8 + NFT_NAME_MAXLEN = 0x100 + NFT_NG_MAX = 0x1 + NFT_OBJECT_CONNLIMIT = 0x5 + NFT_OBJECT_COUNTER = 0x1 + NFT_OBJECT_CT_EXPECT = 0x9 + NFT_OBJECT_CT_HELPER = 0x3 + NFT_OBJECT_CT_TIMEOUT = 0x7 + NFT_OBJECT_LIMIT = 0x4 + NFT_OBJECT_MAX = 0xa + NFT_OBJECT_QUOTA = 0x2 + NFT_OBJECT_SECMARK = 0x8 + NFT_OBJECT_SYNPROXY = 0xa + NFT_OBJECT_TUNNEL = 0x6 + NFT_OBJECT_UNSPEC = 0x0 + NFT_OBJ_MAXNAMELEN = 0x100 + NFT_OSF_MAXGENRELEN = 0x10 + NFT_QUEUE_FLAG_BYPASS = 0x1 + NFT_QUEUE_FLAG_CPU_FANOUT = 0x2 + NFT_QUEUE_FLAG_MASK = 0x3 + NFT_REG32_COUNT = 0x10 + NFT_REG32_SIZE = 0x4 + NFT_REG_MAX = 0x4 + NFT_REG_SIZE = 0x10 + NFT_REJECT_ICMPX_MAX = 0x3 + NFT_RT_MAX = 0x4 + NFT_SECMARK_CTX_MAXLEN = 0x100 + NFT_SET_MAXNAMELEN = 0x100 + NFT_SOCKET_MAX = 0x3 + NFT_TABLE_F_MASK = 0x3 + NFT_TABLE_MAXNAMELEN = 0x100 + NFT_TRACETYPE_MAX = 0x3 + NFT_TUNNEL_F_MASK = 0x7 + NFT_TUNNEL_MAX = 0x1 + NFT_TUNNEL_MODE_MAX = 0x2 + NFT_USERDATA_MAXLEN = 0x100 + NFT_XFRM_KEY_MAX = 0x6 + NF_NAT_RANGE_MAP_IPS = 0x1 + NF_NAT_RANGE_MASK = 0x7f + NF_NAT_RANGE_NETMAP = 0x40 + NF_NAT_RANGE_PERSISTENT = 0x8 + NF_NAT_RANGE_PROTO_OFFSET = 0x20 + NF_NAT_RANGE_PROTO_RANDOM = 0x4 + NF_NAT_RANGE_PROTO_RANDOM_ALL = 0x14 + NF_NAT_RANGE_PROTO_RANDOM_FULLY = 0x10 + NF_NAT_RANGE_PROTO_SPECIFIED = 0x2 NILFS_SUPER_MAGIC = 0x3434 NL0 = 0x0 NL1 = 0x100 @@ -2411,6 +2467,7 @@ const ( PR_MCE_KILL_GET = 0x22 PR_MCE_KILL_LATE = 0x0 PR_MCE_KILL_SET = 0x1 + PR_MDWE_NO_INHERIT = 0x2 PR_MDWE_REFUSE_EXEC_GAIN = 0x1 PR_MPX_DISABLE_MANAGEMENT = 0x2c PR_MPX_ENABLE_MANAGEMENT = 0x2b @@ -2615,8 +2672,9 @@ const ( RTAX_FEATURES = 0xc RTAX_FEATURE_ALLFRAG = 0x8 RTAX_FEATURE_ECN = 0x1 - RTAX_FEATURE_MASK = 0xf + RTAX_FEATURE_MASK = 0x1f RTAX_FEATURE_SACK = 0x2 + RTAX_FEATURE_TCP_USEC_TS = 0x10 RTAX_FEATURE_TIMESTAMP = 0x4 RTAX_HOPLIMIT = 0xa RTAX_INITCWND = 0xb @@ -2859,9 +2917,38 @@ const ( SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x1d SC_LOG_FLUSH = 0x100000 + SECCOMP_ADDFD_FLAG_SEND = 0x2 + SECCOMP_ADDFD_FLAG_SETFD = 0x1 + SECCOMP_FILTER_FLAG_LOG = 0x2 + SECCOMP_FILTER_FLAG_NEW_LISTENER = 0x8 + SECCOMP_FILTER_FLAG_SPEC_ALLOW = 0x4 + SECCOMP_FILTER_FLAG_TSYNC = 0x1 + SECCOMP_FILTER_FLAG_TSYNC_ESRCH = 0x10 + SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV = 0x20 + SECCOMP_GET_ACTION_AVAIL = 0x2 + SECCOMP_GET_NOTIF_SIZES = 0x3 + SECCOMP_IOCTL_NOTIF_RECV = 0xc0502100 + SECCOMP_IOCTL_NOTIF_SEND = 0xc0182101 + SECCOMP_IOC_MAGIC = '!' SECCOMP_MODE_DISABLED = 0x0 SECCOMP_MODE_FILTER = 0x2 SECCOMP_MODE_STRICT = 0x1 + SECCOMP_RET_ACTION = 0x7fff0000 + SECCOMP_RET_ACTION_FULL = 0xffff0000 + SECCOMP_RET_ALLOW = 0x7fff0000 + SECCOMP_RET_DATA = 0xffff + SECCOMP_RET_ERRNO = 0x50000 + SECCOMP_RET_KILL = 0x0 + SECCOMP_RET_KILL_PROCESS = 0x80000000 + SECCOMP_RET_KILL_THREAD = 0x0 + SECCOMP_RET_LOG = 0x7ffc0000 + SECCOMP_RET_TRACE = 0x7ff00000 + SECCOMP_RET_TRAP = 0x30000 + SECCOMP_RET_USER_NOTIF = 0x7fc00000 + SECCOMP_SET_MODE_FILTER = 0x1 + SECCOMP_SET_MODE_STRICT = 0x0 + SECCOMP_USER_NOTIF_FD_SYNC_WAKE_UP = 0x1 + SECCOMP_USER_NOTIF_FLAG_CONTINUE = 0x1 SECRETMEM_MAGIC = 0x5345434d SECURITYFS_MAGIC = 0x73636673 SEEK_CUR = 0x1 @@ -3021,6 +3108,7 @@ const ( SOL_TIPC = 0x10f SOL_TLS = 0x11a SOL_UDP = 0x11 + SOL_VSOCK = 0x11f SOL_X25 = 0x106 SOL_XDP = 0x11b SOMAXCONN = 0x1000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 4920821c..42ff8c3c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index a0c1e411..dca43600 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -282,6 +282,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index c6398556..5cca668a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -288,6 +288,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 47cc62e2..d8cae6d1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -278,6 +278,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 27ac4a09..28e39afd 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -275,6 +275,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 54694642..cd66e92c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 3adb81d7..c1595eba 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 2dfe98f0..ee9456b0 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index f5398f84..8cfca81e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index c54f152d..60b0deb3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -336,6 +336,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 76057dc7..f90aa728 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -340,6 +340,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index e0c3725e..ba9e0150 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -340,6 +340,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 18f2813e..07cdfd6e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -272,6 +272,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 11619d4e..2f1dd214 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -344,6 +344,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 396d994d..f40519d9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -335,6 +335,9 @@ const ( SCM_TIMESTAMPNS = 0x21 SCM_TXTIME = 0x3f SCM_WIFI_STATUS = 0x25 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x400000 SFD_NONBLOCK = 0x4000 SF_FP = 0x38 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 1488d271..87d8612a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -906,6 +906,16 @@ func Fspick(dirfd int, pathName string, flags int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fsconfig(fd int, cmd uint, key *byte, value *byte, aux int) (err error) { + _, _, e1 := Syscall6(SYS_FSCONFIG, uintptr(fd), uintptr(cmd), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(value)), uintptr(aux), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index a1d06159..9dc42410 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -2297,5 +2297,3 @@ func unveil(path *byte, flags *byte) (err error) { var libc_unveil_trampoline_addr uintptr //go:cgo_import_dynamic libc_unveil unveil "libc.so" - - diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index 5b2a7409..0d3a0751 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -2297,5 +2297,3 @@ func unveil(path *byte, flags *byte) (err error) { var libc_unveil_trampoline_addr uintptr //go:cgo_import_dynamic libc_unveil unveil "libc.so" - - diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index f6eda134..c39f7776 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -2297,5 +2297,3 @@ func unveil(path *byte, flags *byte) (err error) { var libc_unveil_trampoline_addr uintptr //go:cgo_import_dynamic libc_unveil unveil "libc.so" - - diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index 55df20ae..57571d07 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -2297,5 +2297,3 @@ func unveil(path *byte, flags *byte) (err error) { var libc_unveil_trampoline_addr uintptr //go:cgo_import_dynamic libc_unveil unveil "libc.so" - - diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go index 8c1155cb..e62963e6 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go @@ -2297,5 +2297,3 @@ func unveil(path *byte, flags *byte) (err error) { var libc_unveil_trampoline_addr uintptr //go:cgo_import_dynamic libc_unveil unveil "libc.so" - - diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go index 7cc80c58..00831354 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go @@ -2297,5 +2297,3 @@ func unveil(path *byte, flags *byte) (err error) { var libc_unveil_trampoline_addr uintptr //go:cgo_import_dynamic libc_unveil unveil "libc.so" - - diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go index 0688737f..79029ed5 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go @@ -2297,5 +2297,3 @@ func unveil(path *byte, flags *byte) (err error) { var libc_unveil_trampoline_addr uintptr //go:cgo_import_dynamic libc_unveil unveil "libc.so" - - diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index fcf3ecbd..0cc3ce49 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -448,4 +448,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index f56dc250..856d92d6 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -371,4 +371,7 @@ const ( SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 974bf246..8d467094 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -412,4 +412,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 39a2739e..edc17324 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -315,4 +315,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index cf9c9d77..445eba20 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -309,4 +309,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 10b7362e..adba01bc 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -432,4 +432,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 4450 SYS_CACHESTAT = 4451 SYS_FCHMODAT2 = 4452 + SYS_MAP_SHADOW_STACK = 4453 + SYS_FUTEX_WAKE = 4454 + SYS_FUTEX_WAIT = 4455 + SYS_FUTEX_REQUEUE = 4456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index cd4d8b4f..014c4e9c 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -362,4 +362,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 5450 SYS_CACHESTAT = 5451 SYS_FCHMODAT2 = 5452 + SYS_MAP_SHADOW_STACK = 5453 + SYS_FUTEX_WAKE = 5454 + SYS_FUTEX_WAIT = 5455 + SYS_FUTEX_REQUEUE = 5456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index 2c0efca8..ccc97d74 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -362,4 +362,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 5450 SYS_CACHESTAT = 5451 SYS_FCHMODAT2 = 5452 + SYS_MAP_SHADOW_STACK = 5453 + SYS_FUTEX_WAKE = 5454 + SYS_FUTEX_WAIT = 5455 + SYS_FUTEX_REQUEUE = 5456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index a72e31d3..ec2b64a9 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -432,4 +432,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 4450 SYS_CACHESTAT = 4451 SYS_FCHMODAT2 = 4452 + SYS_MAP_SHADOW_STACK = 4453 + SYS_FUTEX_WAKE = 4454 + SYS_FUTEX_WAIT = 4455 + SYS_FUTEX_REQUEUE = 4456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index c7d1e374..21a839e3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -439,4 +439,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index f4d4838c..c11121ec 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -411,4 +411,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index b64f0e59..909b631f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -411,4 +411,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 95711195..e49bed16 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -316,4 +316,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index f94e943b..66017d2d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -377,4 +377,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index ba0c2bc5..47bab18d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -390,4 +390,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index bbf8399f..eff6bcde 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -174,7 +174,8 @@ type FscryptPolicyV2 struct { Contents_encryption_mode uint8 Filenames_encryption_mode uint8 Flags uint8 - _ [4]uint8 + Log2_data_unit_size uint8 + _ [3]uint8 Master_key_identifier [16]uint8 } @@ -455,60 +456,63 @@ type Ucred struct { } type TCPInfo struct { - State uint8 - Ca_state uint8 - Retransmits uint8 - Probes uint8 - Backoff uint8 - Options uint8 - Rto uint32 - Ato uint32 - Snd_mss uint32 - Rcv_mss uint32 - Unacked uint32 - Sacked uint32 - Lost uint32 - Retrans uint32 - Fackets uint32 - Last_data_sent uint32 - Last_ack_sent uint32 - Last_data_recv uint32 - Last_ack_recv uint32 - Pmtu uint32 - Rcv_ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Snd_ssthresh uint32 - Snd_cwnd uint32 - Advmss uint32 - Reordering uint32 - Rcv_rtt uint32 - Rcv_space uint32 - Total_retrans uint32 - Pacing_rate uint64 - Max_pacing_rate uint64 - Bytes_acked uint64 - Bytes_received uint64 - Segs_out uint32 - Segs_in uint32 - Notsent_bytes uint32 - Min_rtt uint32 - Data_segs_in uint32 - Data_segs_out uint32 - Delivery_rate uint64 - Busy_time uint64 - Rwnd_limited uint64 - Sndbuf_limited uint64 - Delivered uint32 - Delivered_ce uint32 - Bytes_sent uint64 - Bytes_retrans uint64 - Dsack_dups uint32 - Reord_seen uint32 - Rcv_ooopack uint32 - Snd_wnd uint32 - Rcv_wnd uint32 - Rehash uint32 + State uint8 + Ca_state uint8 + Retransmits uint8 + Probes uint8 + Backoff uint8 + Options uint8 + Rto uint32 + Ato uint32 + Snd_mss uint32 + Rcv_mss uint32 + Unacked uint32 + Sacked uint32 + Lost uint32 + Retrans uint32 + Fackets uint32 + Last_data_sent uint32 + Last_ack_sent uint32 + Last_data_recv uint32 + Last_ack_recv uint32 + Pmtu uint32 + Rcv_ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Advmss uint32 + Reordering uint32 + Rcv_rtt uint32 + Rcv_space uint32 + Total_retrans uint32 + Pacing_rate uint64 + Max_pacing_rate uint64 + Bytes_acked uint64 + Bytes_received uint64 + Segs_out uint32 + Segs_in uint32 + Notsent_bytes uint32 + Min_rtt uint32 + Data_segs_in uint32 + Data_segs_out uint32 + Delivery_rate uint64 + Busy_time uint64 + Rwnd_limited uint64 + Sndbuf_limited uint64 + Delivered uint32 + Delivered_ce uint32 + Bytes_sent uint64 + Bytes_retrans uint64 + Dsack_dups uint32 + Reord_seen uint32 + Rcv_ooopack uint32 + Snd_wnd uint32 + Rcv_wnd uint32 + Rehash uint32 + Total_rto uint16 + Total_rto_recoveries uint16 + Total_rto_time uint32 } type CanFilter struct { @@ -551,7 +555,7 @@ const ( SizeofIPv6MTUInfo = 0x20 SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc - SizeofTCPInfo = 0xf0 + SizeofTCPInfo = 0xf8 SizeofCanFilter = 0x8 SizeofTCPRepairOpt = 0x8 ) @@ -832,6 +836,15 @@ const ( FSPICK_EMPTY_PATH = 0x8 FSMOUNT_CLOEXEC = 0x1 + + FSCONFIG_SET_FLAG = 0x0 + FSCONFIG_SET_STRING = 0x1 + FSCONFIG_SET_BINARY = 0x2 + FSCONFIG_SET_PATH = 0x3 + FSCONFIG_SET_PATH_EMPTY = 0x4 + FSCONFIG_SET_FD = 0x5 + FSCONFIG_CMD_CREATE = 0x6 + FSCONFIG_CMD_RECONFIGURE = 0x7 ) type OpenHow struct { @@ -1546,6 +1559,7 @@ const ( IFLA_DEVLINK_PORT = 0x3e IFLA_GSO_IPV4_MAX_SIZE = 0x3f IFLA_GRO_IPV4_MAX_SIZE = 0x40 + IFLA_DPLL_PIN = 0x41 IFLA_PROTO_DOWN_REASON_UNSPEC = 0x0 IFLA_PROTO_DOWN_REASON_MASK = 0x1 IFLA_PROTO_DOWN_REASON_VALUE = 0x2 @@ -1561,6 +1575,7 @@ const ( IFLA_INET6_ICMP6STATS = 0x6 IFLA_INET6_TOKEN = 0x7 IFLA_INET6_ADDR_GEN_MODE = 0x8 + IFLA_INET6_RA_MTU = 0x9 IFLA_BR_UNSPEC = 0x0 IFLA_BR_FORWARD_DELAY = 0x1 IFLA_BR_HELLO_TIME = 0x2 @@ -1608,6 +1623,9 @@ const ( IFLA_BR_MCAST_MLD_VERSION = 0x2c IFLA_BR_VLAN_STATS_PER_PORT = 0x2d IFLA_BR_MULTI_BOOLOPT = 0x2e + IFLA_BR_MCAST_QUERIER_STATE = 0x2f + IFLA_BR_FDB_N_LEARNED = 0x30 + IFLA_BR_FDB_MAX_LEARNED = 0x31 IFLA_BRPORT_UNSPEC = 0x0 IFLA_BRPORT_STATE = 0x1 IFLA_BRPORT_PRIORITY = 0x2 @@ -1645,6 +1663,14 @@ const ( IFLA_BRPORT_BACKUP_PORT = 0x22 IFLA_BRPORT_MRP_RING_OPEN = 0x23 IFLA_BRPORT_MRP_IN_OPEN = 0x24 + IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT = 0x25 + IFLA_BRPORT_MCAST_EHT_HOSTS_CNT = 0x26 + IFLA_BRPORT_LOCKED = 0x27 + IFLA_BRPORT_MAB = 0x28 + IFLA_BRPORT_MCAST_N_GROUPS = 0x29 + IFLA_BRPORT_MCAST_MAX_GROUPS = 0x2a + IFLA_BRPORT_NEIGH_VLAN_SUPPRESS = 0x2b + IFLA_BRPORT_BACKUP_NHID = 0x2c IFLA_INFO_UNSPEC = 0x0 IFLA_INFO_KIND = 0x1 IFLA_INFO_DATA = 0x2 @@ -1666,6 +1692,9 @@ const ( IFLA_MACVLAN_MACADDR = 0x4 IFLA_MACVLAN_MACADDR_DATA = 0x5 IFLA_MACVLAN_MACADDR_COUNT = 0x6 + IFLA_MACVLAN_BC_QUEUE_LEN = 0x7 + IFLA_MACVLAN_BC_QUEUE_LEN_USED = 0x8 + IFLA_MACVLAN_BC_CUTOFF = 0x9 IFLA_VRF_UNSPEC = 0x0 IFLA_VRF_TABLE = 0x1 IFLA_VRF_PORT_UNSPEC = 0x0 @@ -1689,9 +1718,22 @@ const ( IFLA_XFRM_UNSPEC = 0x0 IFLA_XFRM_LINK = 0x1 IFLA_XFRM_IF_ID = 0x2 + IFLA_XFRM_COLLECT_METADATA = 0x3 IFLA_IPVLAN_UNSPEC = 0x0 IFLA_IPVLAN_MODE = 0x1 IFLA_IPVLAN_FLAGS = 0x2 + NETKIT_NEXT = -0x1 + NETKIT_PASS = 0x0 + NETKIT_DROP = 0x2 + NETKIT_REDIRECT = 0x7 + NETKIT_L2 = 0x0 + NETKIT_L3 = 0x1 + IFLA_NETKIT_UNSPEC = 0x0 + IFLA_NETKIT_PEER_INFO = 0x1 + IFLA_NETKIT_PRIMARY = 0x2 + IFLA_NETKIT_POLICY = 0x3 + IFLA_NETKIT_PEER_POLICY = 0x4 + IFLA_NETKIT_MODE = 0x5 IFLA_VXLAN_UNSPEC = 0x0 IFLA_VXLAN_ID = 0x1 IFLA_VXLAN_GROUP = 0x2 @@ -1722,6 +1764,8 @@ const ( IFLA_VXLAN_GPE = 0x1b IFLA_VXLAN_TTL_INHERIT = 0x1c IFLA_VXLAN_DF = 0x1d + IFLA_VXLAN_VNIFILTER = 0x1e + IFLA_VXLAN_LOCALBYPASS = 0x1f IFLA_GENEVE_UNSPEC = 0x0 IFLA_GENEVE_ID = 0x1 IFLA_GENEVE_REMOTE = 0x2 @@ -1736,6 +1780,7 @@ const ( IFLA_GENEVE_LABEL = 0xb IFLA_GENEVE_TTL_INHERIT = 0xc IFLA_GENEVE_DF = 0xd + IFLA_GENEVE_INNER_PROTO_INHERIT = 0xe IFLA_BAREUDP_UNSPEC = 0x0 IFLA_BAREUDP_PORT = 0x1 IFLA_BAREUDP_ETHERTYPE = 0x2 @@ -1748,6 +1793,8 @@ const ( IFLA_GTP_FD1 = 0x2 IFLA_GTP_PDP_HASHSIZE = 0x3 IFLA_GTP_ROLE = 0x4 + IFLA_GTP_CREATE_SOCKETS = 0x5 + IFLA_GTP_RESTART_COUNT = 0x6 IFLA_BOND_UNSPEC = 0x0 IFLA_BOND_MODE = 0x1 IFLA_BOND_ACTIVE_SLAVE = 0x2 @@ -1777,6 +1824,9 @@ const ( IFLA_BOND_AD_ACTOR_SYSTEM = 0x1a IFLA_BOND_TLB_DYNAMIC_LB = 0x1b IFLA_BOND_PEER_NOTIF_DELAY = 0x1c + IFLA_BOND_AD_LACP_ACTIVE = 0x1d + IFLA_BOND_MISSED_MAX = 0x1e + IFLA_BOND_NS_IP6_TARGET = 0x1f IFLA_BOND_AD_INFO_UNSPEC = 0x0 IFLA_BOND_AD_INFO_AGGREGATOR = 0x1 IFLA_BOND_AD_INFO_NUM_PORTS = 0x2 @@ -1792,6 +1842,7 @@ const ( IFLA_BOND_SLAVE_AD_AGGREGATOR_ID = 0x6 IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE = 0x7 IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE = 0x8 + IFLA_BOND_SLAVE_PRIO = 0x9 IFLA_VF_INFO_UNSPEC = 0x0 IFLA_VF_INFO = 0x1 IFLA_VF_UNSPEC = 0x0 @@ -1850,8 +1901,16 @@ const ( IFLA_STATS_LINK_XSTATS_SLAVE = 0x3 IFLA_STATS_LINK_OFFLOAD_XSTATS = 0x4 IFLA_STATS_AF_SPEC = 0x5 + IFLA_STATS_GETSET_UNSPEC = 0x0 + IFLA_STATS_GET_FILTERS = 0x1 + IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS = 0x2 IFLA_OFFLOAD_XSTATS_UNSPEC = 0x0 IFLA_OFFLOAD_XSTATS_CPU_HIT = 0x1 + IFLA_OFFLOAD_XSTATS_HW_S_INFO = 0x2 + IFLA_OFFLOAD_XSTATS_L3_STATS = 0x3 + IFLA_OFFLOAD_XSTATS_HW_S_INFO_UNSPEC = 0x0 + IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST = 0x1 + IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED = 0x2 IFLA_XDP_UNSPEC = 0x0 IFLA_XDP_FD = 0x1 IFLA_XDP_ATTACHED = 0x2 @@ -1881,6 +1940,11 @@ const ( IFLA_RMNET_UNSPEC = 0x0 IFLA_RMNET_MUX_ID = 0x1 IFLA_RMNET_FLAGS = 0x2 + IFLA_MCTP_UNSPEC = 0x0 + IFLA_MCTP_NET = 0x1 + IFLA_DSA_UNSPEC = 0x0 + IFLA_DSA_CONDUIT = 0x1 + IFLA_DSA_MASTER = 0x1 ) const ( @@ -3399,7 +3463,7 @@ const ( DEVLINK_PORT_FN_ATTR_STATE = 0x2 DEVLINK_PORT_FN_ATTR_OPSTATE = 0x3 DEVLINK_PORT_FN_ATTR_CAPS = 0x4 - DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x4 + DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x5 ) type FsverityDigest struct { @@ -4183,7 +4247,8 @@ const ( ) type LandlockRulesetAttr struct { - Access_fs uint64 + Access_fs uint64 + Access_net uint64 } type LandlockPathBeneathAttr struct { @@ -5134,7 +5199,7 @@ const ( NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf - NL80211_FREQUENCY_ATTR_MAX = 0x1b + NL80211_FREQUENCY_ATTR_MAX = 0x1c NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc @@ -5547,7 +5612,7 @@ const ( NL80211_REGDOM_TYPE_CUSTOM_WORLD = 0x2 NL80211_REGDOM_TYPE_INTERSECTION = 0x3 NL80211_REGDOM_TYPE_WORLD = 0x1 - NL80211_REG_RULE_ATTR_MAX = 0x7 + NL80211_REG_RULE_ATTR_MAX = 0x8 NL80211_REKEY_DATA_AKM = 0x4 NL80211_REKEY_DATA_KCK = 0x2 NL80211_REKEY_DATA_KEK = 0x1 diff --git a/vendor/golang.org/x/sys/windows/env_windows.go b/vendor/golang.org/x/sys/windows/env_windows.go index b8ad1925..d4577a42 100644 --- a/vendor/golang.org/x/sys/windows/env_windows.go +++ b/vendor/golang.org/x/sys/windows/env_windows.go @@ -37,14 +37,17 @@ func (token Token) Environ(inheritExisting bool) (env []string, err error) { return nil, err } defer DestroyEnvironmentBlock(block) - blockp := unsafe.Pointer(block) - for { - entry := UTF16PtrToString((*uint16)(blockp)) - if len(entry) == 0 { - break + size := unsafe.Sizeof(*block) + for *block != 0 { + // find NUL terminator + end := unsafe.Pointer(block) + for *(*uint16)(end) != 0 { + end = unsafe.Add(end, size) } - env = append(env, entry) - blockp = unsafe.Add(blockp, 2*(len(entry)+1)) + + entry := unsafe.Slice(block, (uintptr(end)-uintptr(unsafe.Pointer(block)))/size) + env = append(env, UTF16ToString(entry)) + block = (*uint16)(unsafe.Add(end, size)) } return env, nil } diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 47dc5796..6395a031 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -125,8 +125,7 @@ func UTF16PtrToString(p *uint16) string { for ptr := unsafe.Pointer(p); *(*uint16)(ptr) != 0; n++ { ptr = unsafe.Pointer(uintptr(ptr) + unsafe.Sizeof(*p)) } - - return string(utf16.Decode(unsafe.Slice(p, n))) + return UTF16ToString(unsafe.Slice(p, n)) } func Getpagesize() int { return 4096 } @@ -194,6 +193,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys GetComputerName(buf *uint16, n *uint32) (err error) = GetComputerNameW //sys GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) = GetComputerNameExW //sys SetEndOfFile(handle Handle) (err error) +//sys SetFileValidData(handle Handle, validDataLength int64) (err error) //sys GetSystemTimeAsFileTime(time *Filetime) //sys GetSystemTimePreciseAsFileTime(time *Filetime) //sys GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) [failretval==0xffffffff] diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 146a1f01..e8791c82 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -342,6 +342,7 @@ var ( procSetDefaultDllDirectories = modkernel32.NewProc("SetDefaultDllDirectories") procSetDllDirectoryW = modkernel32.NewProc("SetDllDirectoryW") procSetEndOfFile = modkernel32.NewProc("SetEndOfFile") + procSetFileValidData = modkernel32.NewProc("SetFileValidData") procSetEnvironmentVariableW = modkernel32.NewProc("SetEnvironmentVariableW") procSetErrorMode = modkernel32.NewProc("SetErrorMode") procSetEvent = modkernel32.NewProc("SetEvent") @@ -2988,6 +2989,14 @@ func SetEndOfFile(handle Handle) (err error) { return } +func SetFileValidData(handle Handle, validDataLength int64) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileValidData.Addr(), 2, uintptr(handle), uintptr(validDataLength), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SetEnvironmentVariable(name *uint16, value *uint16) (err error) { r1, _, e1 := syscall.Syscall(procSetEnvironmentVariableW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), 0) if r1 == 0 { diff --git a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go index 0454cdd7..333676b7 100644 --- a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go +++ b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go @@ -13,16 +13,17 @@ import ( "golang.org/x/tools/internal/gocommand" ) -var debug = false - func GetSizesForArgsGolist(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (string, string, error) { inv.Verb = "list" inv.Args = []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"} stdout, stderr, friendlyErr, rawErr := gocmdRunner.RunRaw(ctx, inv) var goarch, compiler string if rawErr != nil { - if rawErrMsg := rawErr.Error(); strings.Contains(rawErrMsg, "cannot find main module") || strings.Contains(rawErrMsg, "go.mod file not found") { - // User's running outside of a module. All bets are off. Get GOARCH and guess compiler is gc. + rawErrMsg := rawErr.Error() + if strings.Contains(rawErrMsg, "cannot find main module") || + strings.Contains(rawErrMsg, "go.mod file not found") { + // User's running outside of a module. + // All bets are off. Get GOARCH and guess compiler is gc. // TODO(matloob): Is this a problem in practice? inv.Verb = "env" inv.Args = []string{"GOARCH"} @@ -32,8 +33,12 @@ func GetSizesForArgsGolist(ctx context.Context, inv gocommand.Invocation, gocmdR } goarch = strings.TrimSpace(envout.String()) compiler = "gc" - } else { + } else if friendlyErr != nil { return "", "", friendlyErr + } else { + // This should be unreachable, but be defensive + // in case RunRaw's error results are inconsistent. + return "", "", rawErr } } else { fields := strings.Fields(stdout.String()) diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go index a7a8f73e..b2a0b7c6 100644 --- a/vendor/golang.org/x/tools/go/packages/doc.go +++ b/vendor/golang.org/x/tools/go/packages/doc.go @@ -5,12 +5,32 @@ /* Package packages loads Go packages for inspection and analysis. -The Load function takes as input a list of patterns and return a list of Package -structs describing individual packages matched by those patterns. -The LoadMode controls the amount of detail in the loaded packages. - -Load passes most patterns directly to the underlying build tool, -but all patterns with the prefix "query=", where query is a +The [Load] function takes as input a list of patterns and returns a +list of [Package] values describing individual packages matched by those +patterns. +A [Config] specifies configuration options, the most important of which is +the [LoadMode], which controls the amount of detail in the loaded packages. + +Load passes most patterns directly to the underlying build tool. +The default build tool is the go command. +Its supported patterns are described at +https://pkg.go.dev/cmd/go#hdr-Package_lists_and_patterns. + +Load may be used in Go projects that use alternative build systems, by +installing an appropriate "driver" program for the build system and +specifying its location in the GOPACKAGESDRIVER environment variable. +For example, +https://github.com/bazelbuild/rules_go/wiki/Editor-and-tool-integration +explains how to use the driver for Bazel. +The driver program is responsible for interpreting patterns in its +preferred notation and reporting information about the packages that +they identify. +(See driverRequest and driverResponse types for the JSON +schema used by the protocol. +Though the protocol is supported, these types are currently unexported; +see #64608 for a proposal to publish them.) + +Regardless of driver, all patterns with the prefix "query=", where query is a non-empty string of letters from [a-z], are reserved and may be interpreted as query operators. @@ -64,7 +84,7 @@ reported about the loaded packages. See the documentation for type LoadMode for details. Most tools should pass their command-line arguments (after any flags) -uninterpreted to the loader, so that the loader can interpret them +uninterpreted to [Load], so that it can interpret them according to the conventions of the underlying build system. See the Example function for typical usage. */ diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go index 7242a0a7..7db1d129 100644 --- a/vendor/golang.org/x/tools/go/packages/external.go +++ b/vendor/golang.org/x/tools/go/packages/external.go @@ -12,8 +12,8 @@ import ( "bytes" "encoding/json" "fmt" - exec "golang.org/x/sys/execabs" "os" + "os/exec" "strings" ) diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index 1f1eade0..cd375fbc 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -11,6 +11,7 @@ import ( "fmt" "log" "os" + "os/exec" "path" "path/filepath" "reflect" @@ -20,7 +21,6 @@ import ( "sync" "unicode" - exec "golang.org/x/sys/execabs" "golang.org/x/tools/go/internal/packagesdriver" "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/packagesinternal" @@ -208,62 +208,6 @@ extractQueries: } } - // Only use go/packages' overlay processing if we're using a Go version - // below 1.16. Otherwise, go list handles it. - if goVersion, err := state.getGoVersion(); err == nil && goVersion < 16 { - modifiedPkgs, needPkgs, err := state.processGolistOverlay(response) - if err != nil { - return nil, err - } - - var containsCandidates []string - if len(containFiles) > 0 { - containsCandidates = append(containsCandidates, modifiedPkgs...) - containsCandidates = append(containsCandidates, needPkgs...) - } - if err := state.addNeededOverlayPackages(response, needPkgs); err != nil { - return nil, err - } - // Check candidate packages for containFiles. - if len(containFiles) > 0 { - for _, id := range containsCandidates { - pkg, ok := response.seenPackages[id] - if !ok { - response.addPackage(&Package{ - ID: id, - Errors: []Error{{ - Kind: ListError, - Msg: fmt.Sprintf("package %s expected but not seen", id), - }}, - }) - continue - } - for _, f := range containFiles { - for _, g := range pkg.GoFiles { - if sameFile(f, g) { - response.addRoot(id) - } - } - } - } - } - // Add root for any package that matches a pattern. This applies only to - // packages that are modified by overlays, since they are not added as - // roots automatically. - for _, pattern := range restPatterns { - match := matchPattern(pattern) - for _, pkgID := range modifiedPkgs { - pkg, ok := response.seenPackages[pkgID] - if !ok { - continue - } - if match(pkg.PkgPath) { - response.addRoot(pkg.ID) - } - } - } - } - sizeswg.Wait() if sizeserr != nil { return nil, sizeserr @@ -271,24 +215,6 @@ extractQueries: return response.dr, nil } -func (state *golistState) addNeededOverlayPackages(response *responseDeduper, pkgs []string) error { - if len(pkgs) == 0 { - return nil - } - dr, err := state.createDriverResponse(pkgs...) - if err != nil { - return err - } - for _, pkg := range dr.Packages { - response.addPackage(pkg) - } - _, needPkgs, err := state.processGolistOverlay(response) - if err != nil { - return err - } - return state.addNeededOverlayPackages(response, needPkgs) -} - func (state *golistState) runContainsQueries(response *responseDeduper, queries []string) error { for _, query := range queries { // TODO(matloob): Do only one query per directory. diff --git a/vendor/golang.org/x/tools/go/packages/golist_overlay.go b/vendor/golang.org/x/tools/go/packages/golist_overlay.go index 9576b472..d823c474 100644 --- a/vendor/golang.org/x/tools/go/packages/golist_overlay.go +++ b/vendor/golang.org/x/tools/go/packages/golist_overlay.go @@ -6,314 +6,11 @@ package packages import ( "encoding/json" - "fmt" - "go/parser" - "go/token" - "os" "path/filepath" - "regexp" - "sort" - "strconv" - "strings" "golang.org/x/tools/internal/gocommand" ) -// processGolistOverlay provides rudimentary support for adding -// files that don't exist on disk to an overlay. The results can be -// sometimes incorrect. -// TODO(matloob): Handle unsupported cases, including the following: -// - determining the correct package to add given a new import path -func (state *golistState) processGolistOverlay(response *responseDeduper) (modifiedPkgs, needPkgs []string, err error) { - havePkgs := make(map[string]string) // importPath -> non-test package ID - needPkgsSet := make(map[string]bool) - modifiedPkgsSet := make(map[string]bool) - - pkgOfDir := make(map[string][]*Package) - for _, pkg := range response.dr.Packages { - // This is an approximation of import path to id. This can be - // wrong for tests, vendored packages, and a number of other cases. - havePkgs[pkg.PkgPath] = pkg.ID - dir, err := commonDir(pkg.GoFiles) - if err != nil { - return nil, nil, err - } - if dir != "" { - pkgOfDir[dir] = append(pkgOfDir[dir], pkg) - } - } - - // If no new imports are added, it is safe to avoid loading any needPkgs. - // Otherwise, it's hard to tell which package is actually being loaded - // (due to vendoring) and whether any modified package will show up - // in the transitive set of dependencies (because new imports are added, - // potentially modifying the transitive set of dependencies). - var overlayAddsImports bool - - // If both a package and its test package are created by the overlay, we - // need the real package first. Process all non-test files before test - // files, and make the whole process deterministic while we're at it. - var overlayFiles []string - for opath := range state.cfg.Overlay { - overlayFiles = append(overlayFiles, opath) - } - sort.Slice(overlayFiles, func(i, j int) bool { - iTest := strings.HasSuffix(overlayFiles[i], "_test.go") - jTest := strings.HasSuffix(overlayFiles[j], "_test.go") - if iTest != jTest { - return !iTest // non-tests are before tests. - } - return overlayFiles[i] < overlayFiles[j] - }) - for _, opath := range overlayFiles { - contents := state.cfg.Overlay[opath] - base := filepath.Base(opath) - dir := filepath.Dir(opath) - var pkg *Package // if opath belongs to both a package and its test variant, this will be the test variant - var testVariantOf *Package // if opath is a test file, this is the package it is testing - var fileExists bool - isTestFile := strings.HasSuffix(opath, "_test.go") - pkgName, ok := extractPackageName(opath, contents) - if !ok { - // Don't bother adding a file that doesn't even have a parsable package statement - // to the overlay. - continue - } - // If all the overlay files belong to a different package, change the - // package name to that package. - maybeFixPackageName(pkgName, isTestFile, pkgOfDir[dir]) - nextPackage: - for _, p := range response.dr.Packages { - if pkgName != p.Name && p.ID != "command-line-arguments" { - continue - } - for _, f := range p.GoFiles { - if !sameFile(filepath.Dir(f), dir) { - continue - } - // Make sure to capture information on the package's test variant, if needed. - if isTestFile && !hasTestFiles(p) { - // TODO(matloob): Are there packages other than the 'production' variant - // of a package that this can match? This shouldn't match the test main package - // because the file is generated in another directory. - testVariantOf = p - continue nextPackage - } else if !isTestFile && hasTestFiles(p) { - // We're examining a test variant, but the overlaid file is - // a non-test file. Because the overlay implementation - // (currently) only adds a file to one package, skip this - // package, so that we can add the file to the production - // variant of the package. (https://golang.org/issue/36857 - // tracks handling overlays on both the production and test - // variant of a package). - continue nextPackage - } - if pkg != nil && p != pkg && pkg.PkgPath == p.PkgPath { - // We have already seen the production version of the - // for which p is a test variant. - if hasTestFiles(p) { - testVariantOf = pkg - } - } - pkg = p - if filepath.Base(f) == base { - fileExists = true - } - } - } - // The overlay could have included an entirely new package or an - // ad-hoc package. An ad-hoc package is one that we have manually - // constructed from inadequate `go list` results for a file= query. - // It will have the ID command-line-arguments. - if pkg == nil || pkg.ID == "command-line-arguments" { - // Try to find the module or gopath dir the file is contained in. - // Then for modules, add the module opath to the beginning. - pkgPath, ok, err := state.getPkgPath(dir) - if err != nil { - return nil, nil, err - } - if !ok { - break - } - var forTest string // only set for x tests - isXTest := strings.HasSuffix(pkgName, "_test") - if isXTest { - forTest = pkgPath - pkgPath += "_test" - } - id := pkgPath - if isTestFile { - if isXTest { - id = fmt.Sprintf("%s [%s.test]", pkgPath, forTest) - } else { - id = fmt.Sprintf("%s [%s.test]", pkgPath, pkgPath) - } - } - if pkg != nil { - // TODO(rstambler): We should change the package's path and ID - // here. The only issue is that this messes with the roots. - } else { - // Try to reclaim a package with the same ID, if it exists in the response. - for _, p := range response.dr.Packages { - if reclaimPackage(p, id, opath, contents) { - pkg = p - break - } - } - // Otherwise, create a new package. - if pkg == nil { - pkg = &Package{ - PkgPath: pkgPath, - ID: id, - Name: pkgName, - Imports: make(map[string]*Package), - } - response.addPackage(pkg) - havePkgs[pkg.PkgPath] = id - // Add the production package's sources for a test variant. - if isTestFile && !isXTest && testVariantOf != nil { - pkg.GoFiles = append(pkg.GoFiles, testVariantOf.GoFiles...) - pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, testVariantOf.CompiledGoFiles...) - // Add the package under test and its imports to the test variant. - pkg.forTest = testVariantOf.PkgPath - for k, v := range testVariantOf.Imports { - pkg.Imports[k] = &Package{ID: v.ID} - } - } - if isXTest { - pkg.forTest = forTest - } - } - } - } - if !fileExists { - pkg.GoFiles = append(pkg.GoFiles, opath) - // TODO(matloob): Adding the file to CompiledGoFiles can exhibit the wrong behavior - // if the file will be ignored due to its build tags. - pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, opath) - modifiedPkgsSet[pkg.ID] = true - } - imports, err := extractImports(opath, contents) - if err != nil { - // Let the parser or type checker report errors later. - continue - } - for _, imp := range imports { - // TODO(rstambler): If the package is an x test and the import has - // a test variant, make sure to replace it. - if _, found := pkg.Imports[imp]; found { - continue - } - overlayAddsImports = true - id, ok := havePkgs[imp] - if !ok { - var err error - id, err = state.resolveImport(dir, imp) - if err != nil { - return nil, nil, err - } - } - pkg.Imports[imp] = &Package{ID: id} - // Add dependencies to the non-test variant version of this package as well. - if testVariantOf != nil { - testVariantOf.Imports[imp] = &Package{ID: id} - } - } - } - - // toPkgPath guesses the package path given the id. - toPkgPath := func(sourceDir, id string) (string, error) { - if i := strings.IndexByte(id, ' '); i >= 0 { - return state.resolveImport(sourceDir, id[:i]) - } - return state.resolveImport(sourceDir, id) - } - - // Now that new packages have been created, do another pass to determine - // the new set of missing packages. - for _, pkg := range response.dr.Packages { - for _, imp := range pkg.Imports { - if len(pkg.GoFiles) == 0 { - return nil, nil, fmt.Errorf("cannot resolve imports for package %q with no Go files", pkg.PkgPath) - } - pkgPath, err := toPkgPath(filepath.Dir(pkg.GoFiles[0]), imp.ID) - if err != nil { - return nil, nil, err - } - if _, ok := havePkgs[pkgPath]; !ok { - needPkgsSet[pkgPath] = true - } - } - } - - if overlayAddsImports { - needPkgs = make([]string, 0, len(needPkgsSet)) - for pkg := range needPkgsSet { - needPkgs = append(needPkgs, pkg) - } - } - modifiedPkgs = make([]string, 0, len(modifiedPkgsSet)) - for pkg := range modifiedPkgsSet { - modifiedPkgs = append(modifiedPkgs, pkg) - } - return modifiedPkgs, needPkgs, err -} - -// resolveImport finds the ID of a package given its import path. -// In particular, it will find the right vendored copy when in GOPATH mode. -func (state *golistState) resolveImport(sourceDir, importPath string) (string, error) { - env, err := state.getEnv() - if err != nil { - return "", err - } - if env["GOMOD"] != "" { - return importPath, nil - } - - searchDir := sourceDir - for { - vendorDir := filepath.Join(searchDir, "vendor") - exists, ok := state.vendorDirs[vendorDir] - if !ok { - info, err := os.Stat(vendorDir) - exists = err == nil && info.IsDir() - state.vendorDirs[vendorDir] = exists - } - - if exists { - vendoredPath := filepath.Join(vendorDir, importPath) - if info, err := os.Stat(vendoredPath); err == nil && info.IsDir() { - // We should probably check for .go files here, but shame on anyone who fools us. - path, ok, err := state.getPkgPath(vendoredPath) - if err != nil { - return "", err - } - if ok { - return path, nil - } - } - } - - // We know we've hit the top of the filesystem when we Dir / and get /, - // or C:\ and get C:\, etc. - next := filepath.Dir(searchDir) - if next == searchDir { - break - } - searchDir = next - } - return importPath, nil -} - -func hasTestFiles(p *Package) bool { - for _, f := range p.GoFiles { - if strings.HasSuffix(f, "_test.go") { - return true - } - } - return false -} - // determineRootDirs returns a mapping from absolute directories that could // contain code to their corresponding import path prefixes. func (state *golistState) determineRootDirs() (map[string]string, error) { @@ -384,192 +81,3 @@ func (state *golistState) determineRootDirsGOPATH() (map[string]string, error) { } return m, nil } - -func extractImports(filename string, contents []byte) ([]string, error) { - f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.ImportsOnly) // TODO(matloob): reuse fileset? - if err != nil { - return nil, err - } - var res []string - for _, imp := range f.Imports { - quotedPath := imp.Path.Value - path, err := strconv.Unquote(quotedPath) - if err != nil { - return nil, err - } - res = append(res, path) - } - return res, nil -} - -// reclaimPackage attempts to reuse a package that failed to load in an overlay. -// -// If the package has errors and has no Name, GoFiles, or Imports, -// then it's possible that it doesn't yet exist on disk. -func reclaimPackage(pkg *Package, id string, filename string, contents []byte) bool { - // TODO(rstambler): Check the message of the actual error? - // It differs between $GOPATH and module mode. - if pkg.ID != id { - return false - } - if len(pkg.Errors) != 1 { - return false - } - if pkg.Name != "" || pkg.ExportFile != "" { - return false - } - if len(pkg.GoFiles) > 0 || len(pkg.CompiledGoFiles) > 0 || len(pkg.OtherFiles) > 0 { - return false - } - if len(pkg.Imports) > 0 { - return false - } - pkgName, ok := extractPackageName(filename, contents) - if !ok { - return false - } - pkg.Name = pkgName - pkg.Errors = nil - return true -} - -func extractPackageName(filename string, contents []byte) (string, bool) { - // TODO(rstambler): Check the message of the actual error? - // It differs between $GOPATH and module mode. - f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.PackageClauseOnly) // TODO(matloob): reuse fileset? - if err != nil { - return "", false - } - return f.Name.Name, true -} - -// commonDir returns the directory that all files are in, "" if files is empty, -// or an error if they aren't in the same directory. -func commonDir(files []string) (string, error) { - seen := make(map[string]bool) - for _, f := range files { - seen[filepath.Dir(f)] = true - } - if len(seen) > 1 { - return "", fmt.Errorf("files (%v) are in more than one directory: %v", files, seen) - } - for k := range seen { - // seen has only one element; return it. - return k, nil - } - return "", nil // no files -} - -// It is possible that the files in the disk directory dir have a different package -// name from newName, which is deduced from the overlays. If they all have a different -// package name, and they all have the same package name, then that name becomes -// the package name. -// It returns true if it changes the package name, false otherwise. -func maybeFixPackageName(newName string, isTestFile bool, pkgsOfDir []*Package) { - names := make(map[string]int) - for _, p := range pkgsOfDir { - names[p.Name]++ - } - if len(names) != 1 { - // some files are in different packages - return - } - var oldName string - for k := range names { - oldName = k - } - if newName == oldName { - return - } - // We might have a case where all of the package names in the directory are - // the same, but the overlay file is for an x test, which belongs to its - // own package. If the x test does not yet exist on disk, we may not yet - // have its package name on disk, but we should not rename the packages. - // - // We use a heuristic to determine if this file belongs to an x test: - // The test file should have a package name whose package name has a _test - // suffix or looks like "newName_test". - maybeXTest := strings.HasPrefix(oldName+"_test", newName) || strings.HasSuffix(newName, "_test") - if isTestFile && maybeXTest { - return - } - for _, p := range pkgsOfDir { - p.Name = newName - } -} - -// This function is copy-pasted from -// https://github.com/golang/go/blob/9706f510a5e2754595d716bd64be8375997311fb/src/cmd/go/internal/search/search.go#L360. -// It should be deleted when we remove support for overlays from go/packages. -// -// NOTE: This does not handle any ./... or ./ style queries, as this function -// doesn't know the working directory. -// -// matchPattern(pattern)(name) reports whether -// name matches pattern. Pattern is a limited glob -// pattern in which '...' means 'any string' and there -// is no other special syntax. -// Unfortunately, there are two special cases. Quoting "go help packages": -// -// First, /... at the end of the pattern can match an empty string, -// so that net/... matches both net and packages in its subdirectories, like net/http. -// Second, any slash-separated pattern element containing a wildcard never -// participates in a match of the "vendor" element in the path of a vendored -// package, so that ./... does not match packages in subdirectories of -// ./vendor or ./mycode/vendor, but ./vendor/... and ./mycode/vendor/... do. -// Note, however, that a directory named vendor that itself contains code -// is not a vendored package: cmd/vendor would be a command named vendor, -// and the pattern cmd/... matches it. -func matchPattern(pattern string) func(name string) bool { - // Convert pattern to regular expression. - // The strategy for the trailing /... is to nest it in an explicit ? expression. - // The strategy for the vendor exclusion is to change the unmatchable - // vendor strings to a disallowed code point (vendorChar) and to use - // "(anything but that codepoint)*" as the implementation of the ... wildcard. - // This is a bit complicated but the obvious alternative, - // namely a hand-written search like in most shell glob matchers, - // is too easy to make accidentally exponential. - // Using package regexp guarantees linear-time matching. - - const vendorChar = "\x00" - - if strings.Contains(pattern, vendorChar) { - return func(name string) bool { return false } - } - - re := regexp.QuoteMeta(pattern) - re = replaceVendor(re, vendorChar) - switch { - case strings.HasSuffix(re, `/`+vendorChar+`/\.\.\.`): - re = strings.TrimSuffix(re, `/`+vendorChar+`/\.\.\.`) + `(/vendor|/` + vendorChar + `/\.\.\.)` - case re == vendorChar+`/\.\.\.`: - re = `(/vendor|/` + vendorChar + `/\.\.\.)` - case strings.HasSuffix(re, `/\.\.\.`): - re = strings.TrimSuffix(re, `/\.\.\.`) + `(/\.\.\.)?` - } - re = strings.ReplaceAll(re, `\.\.\.`, `[^`+vendorChar+`]*`) - - reg := regexp.MustCompile(`^` + re + `$`) - - return func(name string) bool { - if strings.Contains(name, vendorChar) { - return false - } - return reg.MatchString(replaceVendor(name, vendorChar)) - } -} - -// replaceVendor returns the result of replacing -// non-trailing vendor path elements in x with repl. -func replaceVendor(x, repl string) string { - if !strings.Contains(x, "vendor") { - return x - } - elem := strings.Split(x, "/") - for i := 0; i < len(elem)-1; i++ { - if elem[i] == "vendor" { - elem[i] = repl - } - } - return strings.Join(elem, "/") -} diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index ece0e7c6..81e9e6a7 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -27,8 +27,8 @@ import ( "golang.org/x/tools/go/gcexportdata" "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/packagesinternal" - "golang.org/x/tools/internal/typeparams" "golang.org/x/tools/internal/typesinternal" + "golang.org/x/tools/internal/versions" ) // A LoadMode controls the amount of detail to return when loading. @@ -258,31 +258,52 @@ type driverResponse struct { // proceeding with further analysis. The PrintErrors function is // provided for convenient display of all errors. func Load(cfg *Config, patterns ...string) ([]*Package, error) { - l := newLoader(cfg) - response, err := defaultDriver(&l.Config, patterns...) + ld := newLoader(cfg) + response, external, err := defaultDriver(&ld.Config, patterns...) if err != nil { return nil, err } - l.sizes = types.SizesFor(response.Compiler, response.Arch) - return l.refine(response) + + ld.sizes = types.SizesFor(response.Compiler, response.Arch) + if ld.sizes == nil && ld.Config.Mode&(NeedTypes|NeedTypesSizes|NeedTypesInfo) != 0 { + // Type size information is needed but unavailable. + if external { + // An external driver may fail to populate the Compiler/GOARCH fields, + // especially since they are relatively new (see #63700). + // Provide a sensible fallback in this case. + ld.sizes = types.SizesFor("gc", runtime.GOARCH) + if ld.sizes == nil { // gccgo-only arch + ld.sizes = types.SizesFor("gc", "amd64") + } + } else { + // Go list should never fail to deliver accurate size information. + // Reject the whole Load since the error is the same for every package. + return nil, fmt.Errorf("can't determine type sizes for compiler %q on GOARCH %q", + response.Compiler, response.Arch) + } + } + + return ld.refine(response) } // defaultDriver is a driver that implements go/packages' fallback behavior. // It will try to request to an external driver, if one exists. If there's // no external driver, or the driver returns a response with NotHandled set, // defaultDriver will fall back to the go list driver. -func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, error) { - driver := findExternalDriver(cfg) - if driver == nil { - driver = goListDriver - } - response, err := driver(cfg, patterns...) - if err != nil { - return response, err - } else if response.NotHandled { - return goListDriver(cfg, patterns...) +// The boolean result indicates that an external driver handled the request. +func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, bool, error) { + if driver := findExternalDriver(cfg); driver != nil { + response, err := driver(cfg, patterns...) + if err != nil { + return nil, false, err + } else if !response.NotHandled { + return response, true, nil + } + // (fall through) } - return response, nil + + response, err := goListDriver(cfg, patterns...) + return response, false, err } // A Package describes a loaded Go package. @@ -411,12 +432,6 @@ func init() { packagesinternal.GetDepsErrors = func(p interface{}) []*packagesinternal.PackageError { return p.(*Package).depsErrors } - packagesinternal.GetGoCmdRunner = func(config interface{}) *gocommand.Runner { - return config.(*Config).gocmdRunner - } - packagesinternal.SetGoCmdRunner = func(config interface{}, runner *gocommand.Runner) { - config.(*Config).gocmdRunner = runner - } packagesinternal.SetModFile = func(config interface{}, value string) { config.(*Config).modFile = value } @@ -553,7 +568,7 @@ type loaderPackage struct { type loader struct { pkgs map[string]*loaderPackage Config - sizes types.Sizes + sizes types.Sizes // non-nil if needed by mode parseCache map[string]*parseValue parseCacheMu sync.Mutex exportMu sync.Mutex // enforces mutual exclusion of exportdata operations @@ -678,39 +693,38 @@ func (ld *loader) refine(response *driverResponse) ([]*Package, error) { } } - // Materialize the import graph. - - const ( - white = 0 // new - grey = 1 // in progress - black = 2 // complete - ) - - // visit traverses the import graph, depth-first, - // and materializes the graph as Packages.Imports. - // - // Valid imports are saved in the Packages.Import map. - // Invalid imports (cycles and missing nodes) are saved in the importErrors map. - // Thus, even in the presence of both kinds of errors, the Import graph remains a DAG. - // - // visit returns whether the package needs src or has a transitive - // dependency on a package that does. These are the only packages - // for which we load source code. - var stack []*loaderPackage - var visit func(lpkg *loaderPackage) bool - var srcPkgs []*loaderPackage - visit = func(lpkg *loaderPackage) bool { - switch lpkg.color { - case black: - return lpkg.needsrc - case grey: - panic("internal error: grey node") - } - lpkg.color = grey - stack = append(stack, lpkg) // push - stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports - // If NeedImports isn't set, the imports fields will all be zeroed out. - if ld.Mode&NeedImports != 0 { + if ld.Mode&NeedImports != 0 { + // Materialize the import graph. + + const ( + white = 0 // new + grey = 1 // in progress + black = 2 // complete + ) + + // visit traverses the import graph, depth-first, + // and materializes the graph as Packages.Imports. + // + // Valid imports are saved in the Packages.Import map. + // Invalid imports (cycles and missing nodes) are saved in the importErrors map. + // Thus, even in the presence of both kinds of errors, + // the Import graph remains a DAG. + // + // visit returns whether the package needs src or has a transitive + // dependency on a package that does. These are the only packages + // for which we load source code. + var stack []*loaderPackage + var visit func(lpkg *loaderPackage) bool + visit = func(lpkg *loaderPackage) bool { + switch lpkg.color { + case black: + return lpkg.needsrc + case grey: + panic("internal error: grey node") + } + lpkg.color = grey + stack = append(stack, lpkg) // push + stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports lpkg.Imports = make(map[string]*Package, len(stubs)) for importPath, ipkg := range stubs { var importErr error @@ -734,40 +748,39 @@ func (ld *loader) refine(response *driverResponse) ([]*Package, error) { } lpkg.Imports[importPath] = imp.Package } - } - if lpkg.needsrc { - srcPkgs = append(srcPkgs, lpkg) - } - if ld.Mode&NeedTypesSizes != 0 { - lpkg.TypesSizes = ld.sizes - } - stack = stack[:len(stack)-1] // pop - lpkg.color = black - return lpkg.needsrc - } + // Complete type information is required for the + // immediate dependencies of each source package. + if lpkg.needsrc && ld.Mode&NeedTypes != 0 { + for _, ipkg := range lpkg.Imports { + ld.pkgs[ipkg.ID].needtypes = true + } + } - if ld.Mode&NeedImports == 0 { - // We do this to drop the stub import packages that we are not even going to try to resolve. - for _, lpkg := range initial { - lpkg.Imports = nil + // NeedTypeSizes causes TypeSizes to be set even + // on packages for which types aren't needed. + if ld.Mode&NeedTypesSizes != 0 { + lpkg.TypesSizes = ld.sizes + } + stack = stack[:len(stack)-1] // pop + lpkg.color = black + + return lpkg.needsrc } - } else { + // For each initial package, create its import DAG. for _, lpkg := range initial { visit(lpkg) } - } - if ld.Mode&NeedImports != 0 && ld.Mode&NeedTypes != 0 { - for _, lpkg := range srcPkgs { - // Complete type information is required for the - // immediate dependencies of each source package. - for _, ipkg := range lpkg.Imports { - imp := ld.pkgs[ipkg.ID] - imp.needtypes = true - } + + } else { + // !NeedImports: drop the stub (ID-only) import packages + // that we are not even going to try to resolve. + for _, lpkg := range initial { + lpkg.Imports = nil } } + // Load type data and syntax if needed, starting at // the initial packages (roots of the import DAG). if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 { @@ -1001,10 +1014,11 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { Defs: make(map[*ast.Ident]types.Object), Uses: make(map[*ast.Ident]types.Object), Implicits: make(map[ast.Node]types.Object), + Instances: make(map[*ast.Ident]types.Instance), Scopes: make(map[ast.Node]*types.Scope), Selections: make(map[*ast.SelectorExpr]*types.Selection), } - typeparams.InitInstanceInfo(lpkg.TypesInfo) + versions.InitFileVersions(lpkg.TypesInfo) lpkg.TypesSizes = ld.sizes importer := importerFunc(func(path string) (*types.Package, error) { @@ -1042,7 +1056,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { IgnoreFuncBodies: ld.Mode&NeedDeps == 0 && !lpkg.initial, Error: appendError, - Sizes: ld.sizes, + Sizes: ld.sizes, // may be nil } if lpkg.Module != nil && lpkg.Module.GoVersion != "" { typesinternal.SetGoVersion(tc, "go"+lpkg.Module.GoVersion) diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go index fa5834ba..11d5c8c3 100644 --- a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go +++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -26,13 +26,10 @@ package objectpath import ( "fmt" "go/types" - "sort" "strconv" "strings" - _ "unsafe" "golang.org/x/tools/internal/typeparams" - "golang.org/x/tools/internal/typesinternal" ) // A Path is an opaque name that identifies a types.Object @@ -123,20 +120,7 @@ func For(obj types.Object) (Path, error) { // An Encoder amortizes the cost of encoding the paths of multiple objects. // The zero value of an Encoder is ready to use. type Encoder struct { - scopeMemo map[*types.Scope][]types.Object // memoization of scopeObjects - namedMethodsMemo map[*types.Named][]*types.Func // memoization of namedMethods() - skipMethodSorting bool -} - -// Expose back doors so that gopls can avoid method sorting, which can dominate -// analysis on certain repositories. -// -// TODO(golang/go#61443): remove this. -func init() { - typesinternal.SkipEncoderMethodSorting = func(enc interface{}) { - enc.(*Encoder).skipMethodSorting = true - } - typesinternal.ObjectpathObject = object + scopeMemo map[*types.Scope][]types.Object // memoization of scopeObjects } // For returns the path to an object relative to its package, @@ -239,7 +223,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { // Reject obviously non-viable cases. switch obj := obj.(type) { case *types.TypeName: - if _, ok := obj.Type().(*typeparams.TypeParam); !ok { + if _, ok := obj.Type().(*types.TypeParam); !ok { // With the exception of type parameters, only package-level type names // have a path. return "", fmt.Errorf("no path for %v", obj) @@ -299,7 +283,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { } } else { if named, _ := T.(*types.Named); named != nil { - if r := findTypeParam(obj, typeparams.ForNamed(named), path, nil); r != nil { + if r := findTypeParam(obj, named.TypeParams(), path, nil); r != nil { // generic named type return Path(r), nil } @@ -328,31 +312,18 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { // Inspect declared methods of defined types. if T, ok := o.Type().(*types.Named); ok { path = append(path, opType) - if !enc.skipMethodSorting { - // Note that method index here is always with respect - // to canonical ordering of methods, regardless of how - // they appear in the underlying type. - for i, m := range enc.namedMethods(T) { - path2 := appendOpArg(path, opMethod, i) - if m == obj { - return Path(path2), nil // found declared method - } - if r := find(obj, m.Type(), append(path2, opType), nil); r != nil { - return Path(r), nil - } + // The method index here is always with respect + // to the underlying go/types data structures, + // which ultimately derives from source order + // and must be preserved by export data. + for i := 0; i < T.NumMethods(); i++ { + m := T.Method(i) + path2 := appendOpArg(path, opMethod, i) + if m == obj { + return Path(path2), nil // found declared method } - } else { - // This branch must match the logic in the branch above, using go/types - // APIs without sorting. - for i := 0; i < T.NumMethods(); i++ { - m := T.Method(i) - path2 := appendOpArg(path, opMethod, i) - if m == obj { - return Path(path2), nil // found declared method - } - if r := find(obj, m.Type(), append(path2, opType), nil); r != nil { - return Path(r), nil - } + if r := find(obj, m.Type(), append(path2, opType), nil); r != nil { + return Path(r), nil } } } @@ -448,22 +419,13 @@ func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) { path = append(path, name...) path = append(path, opType) - if !enc.skipMethodSorting { - for i, m := range enc.namedMethods(named) { - if m == meth { - path = appendOpArg(path, opMethod, i) - return Path(path), true - } - } - } else { - // This branch must match the logic of the branch above, using go/types - // APIs without sorting. - for i := 0; i < named.NumMethods(); i++ { - m := named.Method(i) - if m == meth { - path = appendOpArg(path, opMethod, i) - return Path(path), true - } + // Method indices are w.r.t. the go/types data structures, + // ultimately deriving from source order, + // which is preserved by export data. + for i := 0; i < named.NumMethods(); i++ { + if named.Method(i) == meth { + path = appendOpArg(path, opMethod, i) + return Path(path), true } } @@ -500,7 +462,7 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName] } return find(obj, T.Elem(), append(path, opElem), seen) case *types.Signature: - if r := findTypeParam(obj, typeparams.ForSignature(T), path, seen); r != nil { + if r := findTypeParam(obj, T.TypeParams(), path, seen); r != nil { return r } if r := find(obj, T.Params(), append(path, opParams), seen); r != nil { @@ -543,7 +505,7 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName] } } return nil - case *typeparams.TypeParam: + case *types.TypeParam: name := T.Obj() if name == obj { return append(path, opObj) @@ -563,7 +525,7 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName] panic(T) } -func findTypeParam(obj types.Object, list *typeparams.TypeParamList, path []byte, seen map[*types.TypeName]bool) []byte { +func findTypeParam(obj types.Object, list *types.TypeParamList, path []byte, seen map[*types.TypeName]bool) []byte { for i := 0; i < list.Len(); i++ { tparam := list.At(i) path2 := appendOpArg(path, opTypeParam, i) @@ -576,12 +538,7 @@ func findTypeParam(obj types.Object, list *typeparams.TypeParamList, path []byte // Object returns the object denoted by path p within the package pkg. func Object(pkg *types.Package, p Path) (types.Object, error) { - return object(pkg, string(p), false) -} - -// Note: the skipMethodSorting parameter must match the value of -// Encoder.skipMethodSorting used during encoding. -func object(pkg *types.Package, pathstr string, skipMethodSorting bool) (types.Object, error) { + pathstr := string(p) if pathstr == "" { return nil, fmt.Errorf("empty path") } @@ -605,7 +562,7 @@ func object(pkg *types.Package, pathstr string, skipMethodSorting bool) (types.O } // abstraction of *types.{Named,Signature} type hasTypeParams interface { - TypeParams() *typeparams.TypeParamList + TypeParams() *types.TypeParamList } // abstraction of *types.{Named,TypeParam} type hasObj interface { @@ -707,7 +664,7 @@ func object(pkg *types.Package, pathstr string, skipMethodSorting bool) (types.O t = tparams.At(index) case opConstraint: - tparam, ok := t.(*typeparams.TypeParam) + tparam, ok := t.(*types.TypeParam) if !ok { return nil, fmt.Errorf("cannot apply %q to %s (got %T, want type parameter)", code, t, t) } @@ -747,12 +704,7 @@ func object(pkg *types.Package, pathstr string, skipMethodSorting bool) (types.O if index >= t.NumMethods() { return nil, fmt.Errorf("method index %d out of range [0-%d)", index, t.NumMethods()) } - if skipMethodSorting { - obj = t.Method(index) - } else { - methods := namedMethods(t) // (unmemoized) - obj = methods[index] // Id-ordered - } + obj = t.Method(index) default: return nil, fmt.Errorf("cannot apply %q to %s (got %T, want interface or named)", code, t, t) @@ -779,33 +731,6 @@ func object(pkg *types.Package, pathstr string, skipMethodSorting bool) (types.O return obj, nil // success } -// namedMethods returns the methods of a Named type in ascending Id order. -func namedMethods(named *types.Named) []*types.Func { - methods := make([]*types.Func, named.NumMethods()) - for i := range methods { - methods[i] = named.Method(i) - } - sort.Slice(methods, func(i, j int) bool { - return methods[i].Id() < methods[j].Id() - }) - return methods -} - -// namedMethods is a memoization of the namedMethods function. Callers must not modify the result. -func (enc *Encoder) namedMethods(named *types.Named) []*types.Func { - m := enc.namedMethodsMemo - if m == nil { - m = make(map[*types.Named][]*types.Func) - enc.namedMethodsMemo = m - } - methods, ok := m[named] - if !ok { - methods = namedMethods(named) // allocates and sorts - m[named] = methods - } - return methods -} - // scopeObjects is a memoization of scope objects. // Callers must not modify the result. func (enc *Encoder) scopeObjects(scope *types.Scope) []types.Object { diff --git a/vendor/golang.org/x/tools/internal/event/keys/util.go b/vendor/golang.org/x/tools/internal/event/keys/util.go new file mode 100644 index 00000000..c0e8e731 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/keys/util.go @@ -0,0 +1,21 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package keys + +import ( + "sort" + "strings" +) + +// Join returns a canonical join of the keys in S: +// a sorted comma-separated string list. +func Join[S ~[]T, T ~string](s S) string { + strs := make([]string, 0, len(s)) + for _, v := range s { + strs = append(strs, string(v)) + } + sort.Strings(strs) + return strings.Join(strs, ",") +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go index 6103dd71..2ee8c701 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go @@ -24,7 +24,6 @@ import ( "golang.org/x/tools/go/types/objectpath" "golang.org/x/tools/internal/tokeninternal" - "golang.org/x/tools/internal/typeparams" ) // IExportShallow encodes "shallow" export data for the specified package. @@ -481,7 +480,7 @@ func (p *iexporter) doDecl(obj types.Object) { } // Function. - if typeparams.ForSignature(sig).Len() == 0 { + if sig.TypeParams().Len() == 0 { w.tag('F') } else { w.tag('G') @@ -494,7 +493,7 @@ func (p *iexporter) doDecl(obj types.Object) { // // While importing the type parameters, tparamList computes and records // their export name, so that it can be later used when writing the index. - if tparams := typeparams.ForSignature(sig); tparams.Len() > 0 { + if tparams := sig.TypeParams(); tparams.Len() > 0 { w.tparamList(obj.Name(), tparams, obj.Pkg()) } w.signature(sig) @@ -507,14 +506,14 @@ func (p *iexporter) doDecl(obj types.Object) { case *types.TypeName: t := obj.Type() - if tparam, ok := t.(*typeparams.TypeParam); ok { + if tparam, ok := t.(*types.TypeParam); ok { w.tag('P') w.pos(obj.Pos()) constraint := tparam.Constraint() if p.version >= iexportVersionGo1_18 { implicit := false if iface, _ := constraint.(*types.Interface); iface != nil { - implicit = typeparams.IsImplicit(iface) + implicit = iface.IsImplicit() } w.bool(implicit) } @@ -535,17 +534,17 @@ func (p *iexporter) doDecl(obj types.Object) { panic(internalErrorf("%s is not a defined type", t)) } - if typeparams.ForNamed(named).Len() == 0 { + if named.TypeParams().Len() == 0 { w.tag('T') } else { w.tag('U') } w.pos(obj.Pos()) - if typeparams.ForNamed(named).Len() > 0 { + if named.TypeParams().Len() > 0 { // While importing the type parameters, tparamList computes and records // their export name, so that it can be later used when writing the index. - w.tparamList(obj.Name(), typeparams.ForNamed(named), obj.Pkg()) + w.tparamList(obj.Name(), named.TypeParams(), obj.Pkg()) } underlying := obj.Type().Underlying() @@ -565,7 +564,7 @@ func (p *iexporter) doDecl(obj types.Object) { // Receiver type parameters are type arguments of the receiver type, so // their name must be qualified before exporting recv. - if rparams := typeparams.RecvTypeParams(sig); rparams.Len() > 0 { + if rparams := sig.RecvTypeParams(); rparams.Len() > 0 { prefix := obj.Name() + "." + m.Name() for i := 0; i < rparams.Len(); i++ { rparam := rparams.At(i) @@ -740,19 +739,19 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { } switch t := t.(type) { case *types.Named: - if targs := typeparams.NamedTypeArgs(t); targs.Len() > 0 { + if targs := t.TypeArgs(); targs.Len() > 0 { w.startType(instanceType) // TODO(rfindley): investigate if this position is correct, and if it // matters. w.pos(t.Obj().Pos()) w.typeList(targs, pkg) - w.typ(typeparams.NamedTypeOrigin(t), pkg) + w.typ(t.Origin(), pkg) return } w.startType(definedType) w.qualifiedType(t.Obj()) - case *typeparams.TypeParam: + case *types.TypeParam: w.startType(typeParamType) w.qualifiedType(t.Obj()) @@ -868,7 +867,7 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { w.signature(sig) } - case *typeparams.Union: + case *types.Union: w.startType(unionType) nt := t.Len() w.uint64(uint64(nt)) @@ -948,14 +947,14 @@ func (w *exportWriter) signature(sig *types.Signature) { } } -func (w *exportWriter) typeList(ts *typeparams.TypeList, pkg *types.Package) { +func (w *exportWriter) typeList(ts *types.TypeList, pkg *types.Package) { w.uint64(uint64(ts.Len())) for i := 0; i < ts.Len(); i++ { w.typ(ts.At(i), pkg) } } -func (w *exportWriter) tparamList(prefix string, list *typeparams.TypeParamList, pkg *types.Package) { +func (w *exportWriter) tparamList(prefix string, list *types.TypeParamList, pkg *types.Package) { ll := uint64(list.Len()) w.uint64(ll) for i := 0; i < list.Len(); i++ { @@ -973,7 +972,7 @@ const blankMarker = "$" // differs from its actual object name: it is prefixed with a qualifier, and // blank type parameter names are disambiguated by their index in the type // parameter list. -func tparamExportName(prefix string, tparam *typeparams.TypeParam) string { +func tparamExportName(prefix string, tparam *types.TypeParam) string { assert(prefix != "") name := tparam.Obj().Name() if name == "_" { diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go index 8e64cf64..9bde15e3 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -22,7 +22,6 @@ import ( "strings" "golang.org/x/tools/go/types/objectpath" - "golang.org/x/tools/internal/typeparams" ) type intReader struct { @@ -321,7 +320,7 @@ func iimportCommon(fset *token.FileSet, getPackages GetPackagesFunc, data []byte // Therefore, we defer calling SetConstraint there, and call it here instead // after all types are complete. for _, d := range p.later { - typeparams.SetTypeParamConstraint(d.t, d.constraint) + d.t.SetConstraint(d.constraint) } for _, typ := range p.interfaceList { @@ -339,7 +338,7 @@ func iimportCommon(fset *token.FileSet, getPackages GetPackagesFunc, data []byte } type setConstraintArgs struct { - t *typeparams.TypeParam + t *types.TypeParam constraint types.Type } @@ -549,7 +548,7 @@ func (r *importReader) obj(name string) { r.declare(types.NewConst(pos, r.currPkg, name, typ, val)) case 'F', 'G': - var tparams []*typeparams.TypeParam + var tparams []*types.TypeParam if tag == 'G' { tparams = r.tparamList() } @@ -566,7 +565,7 @@ func (r *importReader) obj(name string) { r.declare(obj) if tag == 'U' { tparams := r.tparamList() - typeparams.SetForNamed(named, tparams) + named.SetTypeParams(tparams) } underlying := r.p.typAt(r.uint64(), named).Underlying() @@ -583,12 +582,12 @@ func (r *importReader) obj(name string) { // typeparams being used in the method sig/body). base := baseType(recv.Type()) assert(base != nil) - targs := typeparams.NamedTypeArgs(base) - var rparams []*typeparams.TypeParam + targs := base.TypeArgs() + var rparams []*types.TypeParam if targs.Len() > 0 { - rparams = make([]*typeparams.TypeParam, targs.Len()) + rparams = make([]*types.TypeParam, targs.Len()) for i := range rparams { - rparams[i] = targs.At(i).(*typeparams.TypeParam) + rparams[i] = targs.At(i).(*types.TypeParam) } } msig := r.signature(recv, rparams, nil) @@ -606,7 +605,7 @@ func (r *importReader) obj(name string) { } name0 := tparamName(name) tn := types.NewTypeName(pos, r.currPkg, name0, nil) - t := typeparams.NewTypeParam(tn, nil) + t := types.NewTypeParam(tn, nil) // To handle recursive references to the typeparam within its // bound, save the partial type in tparamIndex before reading the bounds. @@ -622,7 +621,7 @@ func (r *importReader) obj(name string) { if iface == nil { errorf("non-interface constraint marked implicit") } - typeparams.MarkImplicit(iface) + iface.MarkImplicit() } // The constraint type may not be complete, if we // are in the middle of a type recursion involving type @@ -966,7 +965,7 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { // The imported instantiated type doesn't include any methods, so // we must always use the methods of the base (orig) type. // TODO provide a non-nil *Environment - t, _ := typeparams.Instantiate(nil, baseType, targs, false) + t, _ := types.Instantiate(nil, baseType, targs, false) // Workaround for golang/go#61561. See the doc for instanceList for details. r.p.instanceList = append(r.p.instanceList, t) @@ -976,11 +975,11 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { if r.p.version < iexportVersionGenerics { errorf("unexpected instantiation type") } - terms := make([]*typeparams.Term, r.uint64()) + terms := make([]*types.Term, r.uint64()) for i := range terms { - terms[i] = typeparams.NewTerm(r.bool(), r.typ()) + terms[i] = types.NewTerm(r.bool(), r.typ()) } - return typeparams.NewUnion(terms) + return types.NewUnion(terms) } } @@ -1008,23 +1007,23 @@ func (r *importReader) objectPathObject() types.Object { return obj } -func (r *importReader) signature(recv *types.Var, rparams []*typeparams.TypeParam, tparams []*typeparams.TypeParam) *types.Signature { +func (r *importReader) signature(recv *types.Var, rparams []*types.TypeParam, tparams []*types.TypeParam) *types.Signature { params := r.paramList() results := r.paramList() variadic := params.Len() > 0 && r.bool() - return typeparams.NewSignatureType(recv, rparams, tparams, params, results, variadic) + return types.NewSignatureType(recv, rparams, tparams, params, results, variadic) } -func (r *importReader) tparamList() []*typeparams.TypeParam { +func (r *importReader) tparamList() []*types.TypeParam { n := r.uint64() if n == 0 { return nil } - xs := make([]*typeparams.TypeParam, n) + xs := make([]*types.TypeParam, n) for i := range xs { // Note: the standard library importer is tolerant of nil types here, // though would panic in SetTypeParams. - xs[i] = r.typ().(*typeparams.TypeParam) + xs[i] = r.typ().(*types.TypeParam) } return xs } diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go index 53cf66da..55312522 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/invoke.go +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -13,6 +13,7 @@ import ( "io" "log" "os" + "os/exec" "reflect" "regexp" "runtime" @@ -21,8 +22,6 @@ import ( "sync" "time" - exec "golang.org/x/sys/execabs" - "golang.org/x/tools/internal/event" "golang.org/x/tools/internal/event/keys" "golang.org/x/tools/internal/event/label" @@ -85,6 +84,7 @@ func (runner *Runner) RunPiped(ctx context.Context, inv Invocation, stdout, stde // RunRaw runs the invocation, serializing requests only if they fight over // go.mod changes. +// Postcondition: both error results have same nilness. func (runner *Runner) RunRaw(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) { ctx, done := event.Start(ctx, "gocommand.Runner.RunRaw", invLabels(inv)...) defer done() @@ -95,23 +95,24 @@ func (runner *Runner) RunRaw(ctx context.Context, inv Invocation) (*bytes.Buffer stdout, stderr, friendlyErr, err := runner.runConcurrent(ctx, inv) // If we encounter a load concurrency error, we need to retry serially. - if friendlyErr == nil || !modConcurrencyError.MatchString(friendlyErr.Error()) { - return stdout, stderr, friendlyErr, err + if friendlyErr != nil && modConcurrencyError.MatchString(friendlyErr.Error()) { + event.Error(ctx, "Load concurrency error, will retry serially", err) + + // Run serially by calling runPiped. + stdout.Reset() + stderr.Reset() + friendlyErr, err = runner.runPiped(ctx, inv, stdout, stderr) } - event.Error(ctx, "Load concurrency error, will retry serially", err) - // Run serially by calling runPiped. - stdout.Reset() - stderr.Reset() - friendlyErr, err = runner.runPiped(ctx, inv, stdout, stderr) return stdout, stderr, friendlyErr, err } +// Postcondition: both error results have same nilness. func (runner *Runner) runConcurrent(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) { // Wait for 1 worker to become available. select { case <-ctx.Done(): - return nil, nil, nil, ctx.Err() + return nil, nil, ctx.Err(), ctx.Err() case runner.inFlight <- struct{}{}: defer func() { <-runner.inFlight }() } @@ -121,6 +122,7 @@ func (runner *Runner) runConcurrent(ctx context.Context, inv Invocation) (*bytes return stdout, stderr, friendlyErr, err } +// Postcondition: both error results have same nilness. func (runner *Runner) runPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) (error, error) { // Make sure the runner is always initialized. runner.initialize() @@ -129,7 +131,7 @@ func (runner *Runner) runPiped(ctx context.Context, inv Invocation, stdout, stde // runPiped commands. select { case <-ctx.Done(): - return nil, ctx.Err() + return ctx.Err(), ctx.Err() case runner.serialized <- struct{}{}: defer func() { <-runner.serialized }() } @@ -139,7 +141,7 @@ func (runner *Runner) runPiped(ctx context.Context, inv Invocation, stdout, stde for i := 0; i < maxInFlight; i++ { select { case <-ctx.Done(): - return nil, ctx.Err() + return ctx.Err(), ctx.Err() case runner.inFlight <- struct{}{}: // Make sure we always "return" any workers we took. defer func() { <-runner.inFlight }() @@ -172,6 +174,7 @@ type Invocation struct { Logf func(format string, args ...interface{}) } +// Postcondition: both error results have same nilness. func (i *Invocation) runWithFriendlyError(ctx context.Context, stdout, stderr io.Writer) (friendlyError error, rawError error) { rawError = i.run(ctx, stdout, stderr) if rawError != nil { diff --git a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go index d9950b1f..44719de1 100644 --- a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go +++ b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go @@ -5,10 +5,6 @@ // Package packagesinternal exposes internal-only fields from go/packages. package packagesinternal -import ( - "golang.org/x/tools/internal/gocommand" -) - var GetForTest = func(p interface{}) string { return "" } var GetDepsErrors = func(p interface{}) []*PackageError { return nil } @@ -18,10 +14,6 @@ type PackageError struct { Err string // the error itself } -var GetGoCmdRunner = func(config interface{}) *gocommand.Runner { return nil } - -var SetGoCmdRunner = func(config interface{}, runner *gocommand.Runner) {} - var TypecheckCgo int var DepsErrors int // must be set as a LoadMode to call GetDepsErrors var ForTest int // must be set as a LoadMode to call GetForTest diff --git a/vendor/golang.org/x/tools/internal/typeparams/common.go b/vendor/golang.org/x/tools/internal/typeparams/common.go index d0d0649f..cdab9885 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/common.go +++ b/vendor/golang.org/x/tools/internal/typeparams/common.go @@ -42,7 +42,7 @@ func UnpackIndexExpr(n ast.Node) (x ast.Expr, lbrack token.Pos, indices []ast.Ex switch e := n.(type) { case *ast.IndexExpr: return e.X, e.Lbrack, []ast.Expr{e.Index}, e.Rbrack - case *IndexListExpr: + case *ast.IndexListExpr: return e.X, e.Lbrack, e.Indices, e.Rbrack } return nil, token.NoPos, nil, token.NoPos @@ -63,7 +63,7 @@ func PackIndexExpr(x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack toke Rbrack: rbrack, } default: - return &IndexListExpr{ + return &ast.IndexListExpr{ X: x, Lbrack: lbrack, Indices: indices, @@ -74,7 +74,7 @@ func PackIndexExpr(x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack toke // IsTypeParam reports whether t is a type parameter. func IsTypeParam(t types.Type) bool { - _, ok := t.(*TypeParam) + _, ok := t.(*types.TypeParam) return ok } @@ -100,11 +100,11 @@ func OriginMethod(fn *types.Func) *types.Func { // Receiver is a *types.Interface. return fn } - if ForNamed(named).Len() == 0 { + if named.TypeParams().Len() == 0 { // Receiver base has no type parameters, so we can avoid the lookup below. return fn } - orig := NamedTypeOrigin(named) + orig := named.Origin() gfn, _, _ := types.LookupFieldOrMethod(orig, true, fn.Pkg(), fn.Name()) // This is a fix for a gopls crash (#60628) due to a go/types bug (#60634). In: @@ -157,7 +157,7 @@ func OriginMethod(fn *types.Func) *types.Func { // // In this case, GenericAssignableTo reports that instantiations of Container // are assignable to the corresponding instantiation of Interface. -func GenericAssignableTo(ctxt *Context, V, T types.Type) bool { +func GenericAssignableTo(ctxt *types.Context, V, T types.Type) bool { // If V and T are not both named, or do not have matching non-empty type // parameter lists, fall back on types.AssignableTo. @@ -167,9 +167,9 @@ func GenericAssignableTo(ctxt *Context, V, T types.Type) bool { return types.AssignableTo(V, T) } - vtparams := ForNamed(VN) - ttparams := ForNamed(TN) - if vtparams.Len() == 0 || vtparams.Len() != ttparams.Len() || NamedTypeArgs(VN).Len() != 0 || NamedTypeArgs(TN).Len() != 0 { + vtparams := VN.TypeParams() + ttparams := TN.TypeParams() + if vtparams.Len() == 0 || vtparams.Len() != ttparams.Len() || VN.TypeArgs().Len() != 0 || TN.TypeArgs().Len() != 0 { return types.AssignableTo(V, T) } @@ -182,7 +182,7 @@ func GenericAssignableTo(ctxt *Context, V, T types.Type) bool { // Minor optimization: ensure we share a context across the two // instantiations below. if ctxt == nil { - ctxt = NewContext() + ctxt = types.NewContext() } var targs []types.Type @@ -190,12 +190,12 @@ func GenericAssignableTo(ctxt *Context, V, T types.Type) bool { targs = append(targs, vtparams.At(i)) } - vinst, err := Instantiate(ctxt, V, targs, true) + vinst, err := types.Instantiate(ctxt, V, targs, true) if err != nil { panic("type parameters should satisfy their own constraints") } - tinst, err := Instantiate(ctxt, T, targs, true) + tinst, err := types.Instantiate(ctxt, T, targs, true) if err != nil { return false } diff --git a/vendor/golang.org/x/tools/internal/typeparams/coretype.go b/vendor/golang.org/x/tools/internal/typeparams/coretype.go index 71248209..7ea8840e 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/coretype.go +++ b/vendor/golang.org/x/tools/internal/typeparams/coretype.go @@ -108,15 +108,15 @@ func CoreType(T types.Type) types.Type { // // _NormalTerms makes no guarantees about the order of terms, except that it // is deterministic. -func _NormalTerms(typ types.Type) ([]*Term, error) { +func _NormalTerms(typ types.Type) ([]*types.Term, error) { switch typ := typ.(type) { - case *TypeParam: + case *types.TypeParam: return StructuralTerms(typ) - case *Union: + case *types.Union: return UnionTermSet(typ) case *types.Interface: return InterfaceTermSet(typ) default: - return []*Term{NewTerm(false, typ)}, nil + return []*types.Term{types.NewTerm(false, typ)}, nil } } diff --git a/vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go b/vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go deleted file mode 100644 index 18212390..00000000 --- a/vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.18 -// +build !go1.18 - -package typeparams - -// Enabled reports whether type parameters are enabled in the current build -// environment. -const Enabled = false diff --git a/vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go b/vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go deleted file mode 100644 index d6714882..00000000 --- a/vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.18 -// +build go1.18 - -package typeparams - -// Note: this constant is in a separate file as this is the only acceptable -// diff between the <1.18 API of this package and the 1.18 API. - -// Enabled reports whether type parameters are enabled in the current build -// environment. -const Enabled = true diff --git a/vendor/golang.org/x/tools/internal/typeparams/normalize.go b/vendor/golang.org/x/tools/internal/typeparams/normalize.go index 9c631b65..93c80fdc 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/normalize.go +++ b/vendor/golang.org/x/tools/internal/typeparams/normalize.go @@ -60,7 +60,7 @@ var ErrEmptyTypeSet = errors.New("empty type set") // // StructuralTerms makes no guarantees about the order of terms, except that it // is deterministic. -func StructuralTerms(tparam *TypeParam) ([]*Term, error) { +func StructuralTerms(tparam *types.TypeParam) ([]*types.Term, error) { constraint := tparam.Constraint() if constraint == nil { return nil, fmt.Errorf("%s has nil constraint", tparam) @@ -78,7 +78,7 @@ func StructuralTerms(tparam *TypeParam) ([]*Term, error) { // // See the documentation of StructuralTerms for more information on // normalization. -func InterfaceTermSet(iface *types.Interface) ([]*Term, error) { +func InterfaceTermSet(iface *types.Interface) ([]*types.Term, error) { return computeTermSet(iface) } @@ -88,11 +88,11 @@ func InterfaceTermSet(iface *types.Interface) ([]*Term, error) { // // See the documentation of StructuralTerms for more information on // normalization. -func UnionTermSet(union *Union) ([]*Term, error) { +func UnionTermSet(union *types.Union) ([]*types.Term, error) { return computeTermSet(union) } -func computeTermSet(typ types.Type) ([]*Term, error) { +func computeTermSet(typ types.Type) ([]*types.Term, error) { tset, err := computeTermSetInternal(typ, make(map[types.Type]*termSet), 0) if err != nil { return nil, err @@ -103,9 +103,9 @@ func computeTermSet(typ types.Type) ([]*Term, error) { if tset.terms.isAll() { return nil, nil } - var terms []*Term + var terms []*types.Term for _, term := range tset.terms { - terms = append(terms, NewTerm(term.tilde, term.typ)) + terms = append(terms, types.NewTerm(term.tilde, term.typ)) } return terms, nil } @@ -162,7 +162,7 @@ func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth in tset.terms = allTermlist for i := 0; i < u.NumEmbeddeds(); i++ { embedded := u.EmbeddedType(i) - if _, ok := embedded.Underlying().(*TypeParam); ok { + if _, ok := embedded.Underlying().(*types.TypeParam); ok { return nil, fmt.Errorf("invalid embedded type %T", embedded) } tset2, err := computeTermSetInternal(embedded, seen, depth+1) @@ -171,7 +171,7 @@ func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth in } tset.terms = tset.terms.intersect(tset2.terms) } - case *Union: + case *types.Union: // The term set of a union is the union of term sets of its terms. tset.terms = nil for i := 0; i < u.Len(); i++ { @@ -184,7 +184,7 @@ func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth in return nil, err } terms = tset2.terms - case *TypeParam, *Union: + case *types.TypeParam, *types.Union: // A stand-alone type parameter or union is not permitted as union // term. return nil, fmt.Errorf("invalid union term %T", t) @@ -199,7 +199,7 @@ func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth in return nil, fmt.Errorf("exceeded max term count %d", maxTermCount) } } - case *TypeParam: + case *types.TypeParam: panic("unreachable") default: // For all other types, the term set is just a single non-tilde term diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go b/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go deleted file mode 100644 index 7ed86e17..00000000 --- a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go +++ /dev/null @@ -1,197 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.18 -// +build !go1.18 - -package typeparams - -import ( - "go/ast" - "go/token" - "go/types" -) - -func unsupported() { - panic("type parameters are unsupported at this go version") -} - -// IndexListExpr is a placeholder type, as type parameters are not supported at -// this Go version. Its methods panic on use. -type IndexListExpr struct { - ast.Expr - X ast.Expr // expression - Lbrack token.Pos // position of "[" - Indices []ast.Expr // index expressions - Rbrack token.Pos // position of "]" -} - -// ForTypeSpec returns an empty field list, as type parameters on not supported -// at this Go version. -func ForTypeSpec(*ast.TypeSpec) *ast.FieldList { - return nil -} - -// ForFuncType returns an empty field list, as type parameters are not -// supported at this Go version. -func ForFuncType(*ast.FuncType) *ast.FieldList { - return nil -} - -// TypeParam is a placeholder type, as type parameters are not supported at -// this Go version. Its methods panic on use. -type TypeParam struct{ types.Type } - -func (*TypeParam) Index() int { unsupported(); return 0 } -func (*TypeParam) Constraint() types.Type { unsupported(); return nil } -func (*TypeParam) Obj() *types.TypeName { unsupported(); return nil } - -// TypeParamList is a placeholder for an empty type parameter list. -type TypeParamList struct{} - -func (*TypeParamList) Len() int { return 0 } -func (*TypeParamList) At(int) *TypeParam { unsupported(); return nil } - -// TypeList is a placeholder for an empty type list. -type TypeList struct{} - -func (*TypeList) Len() int { return 0 } -func (*TypeList) At(int) types.Type { unsupported(); return nil } - -// NewTypeParam is unsupported at this Go version, and panics. -func NewTypeParam(name *types.TypeName, constraint types.Type) *TypeParam { - unsupported() - return nil -} - -// SetTypeParamConstraint is unsupported at this Go version, and panics. -func SetTypeParamConstraint(tparam *TypeParam, constraint types.Type) { - unsupported() -} - -// NewSignatureType calls types.NewSignature, panicking if recvTypeParams or -// typeParams is non-empty. -func NewSignatureType(recv *types.Var, recvTypeParams, typeParams []*TypeParam, params, results *types.Tuple, variadic bool) *types.Signature { - if len(recvTypeParams) != 0 || len(typeParams) != 0 { - panic("signatures cannot have type parameters at this Go version") - } - return types.NewSignature(recv, params, results, variadic) -} - -// ForSignature returns an empty slice. -func ForSignature(*types.Signature) *TypeParamList { - return nil -} - -// RecvTypeParams returns a nil slice. -func RecvTypeParams(sig *types.Signature) *TypeParamList { - return nil -} - -// IsComparable returns false, as no interfaces are type-restricted at this Go -// version. -func IsComparable(*types.Interface) bool { - return false -} - -// IsMethodSet returns true, as no interfaces are type-restricted at this Go -// version. -func IsMethodSet(*types.Interface) bool { - return true -} - -// IsImplicit returns false, as no interfaces are implicit at this Go version. -func IsImplicit(*types.Interface) bool { - return false -} - -// MarkImplicit does nothing, because this Go version does not have implicit -// interfaces. -func MarkImplicit(*types.Interface) {} - -// ForNamed returns an empty type parameter list, as type parameters are not -// supported at this Go version. -func ForNamed(*types.Named) *TypeParamList { - return nil -} - -// SetForNamed panics if tparams is non-empty. -func SetForNamed(_ *types.Named, tparams []*TypeParam) { - if len(tparams) > 0 { - unsupported() - } -} - -// NamedTypeArgs returns nil. -func NamedTypeArgs(*types.Named) *TypeList { - return nil -} - -// NamedTypeOrigin is the identity method at this Go version. -func NamedTypeOrigin(named *types.Named) *types.Named { - return named -} - -// Term holds information about a structural type restriction. -type Term struct { - tilde bool - typ types.Type -} - -func (m *Term) Tilde() bool { return m.tilde } -func (m *Term) Type() types.Type { return m.typ } -func (m *Term) String() string { - pre := "" - if m.tilde { - pre = "~" - } - return pre + m.typ.String() -} - -// NewTerm is unsupported at this Go version, and panics. -func NewTerm(tilde bool, typ types.Type) *Term { - return &Term{tilde, typ} -} - -// Union is a placeholder type, as type parameters are not supported at this Go -// version. Its methods panic on use. -type Union struct{ types.Type } - -func (*Union) Len() int { return 0 } -func (*Union) Term(i int) *Term { unsupported(); return nil } - -// NewUnion is unsupported at this Go version, and panics. -func NewUnion(terms []*Term) *Union { - unsupported() - return nil -} - -// InitInstanceInfo is a noop at this Go version. -func InitInstanceInfo(*types.Info) {} - -// Instance is a placeholder type, as type parameters are not supported at this -// Go version. -type Instance struct { - TypeArgs *TypeList - Type types.Type -} - -// GetInstances returns a nil map, as type parameters are not supported at this -// Go version. -func GetInstances(info *types.Info) map[*ast.Ident]Instance { return nil } - -// Context is a placeholder type, as type parameters are not supported at -// this Go version. -type Context struct{} - -// NewContext returns a placeholder Context instance. -func NewContext() *Context { - return &Context{} -} - -// Instantiate is unsupported on this Go version, and panics. -func Instantiate(ctxt *Context, typ types.Type, targs []types.Type, validate bool) (types.Type, error) { - unsupported() - return nil, nil -} diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go b/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go deleted file mode 100644 index cf301af1..00000000 --- a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.18 -// +build go1.18 - -package typeparams - -import ( - "go/ast" - "go/types" -) - -// IndexListExpr is an alias for ast.IndexListExpr. -type IndexListExpr = ast.IndexListExpr - -// ForTypeSpec returns n.TypeParams. -func ForTypeSpec(n *ast.TypeSpec) *ast.FieldList { - if n == nil { - return nil - } - return n.TypeParams -} - -// ForFuncType returns n.TypeParams. -func ForFuncType(n *ast.FuncType) *ast.FieldList { - if n == nil { - return nil - } - return n.TypeParams -} - -// TypeParam is an alias for types.TypeParam -type TypeParam = types.TypeParam - -// TypeParamList is an alias for types.TypeParamList -type TypeParamList = types.TypeParamList - -// TypeList is an alias for types.TypeList -type TypeList = types.TypeList - -// NewTypeParam calls types.NewTypeParam. -func NewTypeParam(name *types.TypeName, constraint types.Type) *TypeParam { - return types.NewTypeParam(name, constraint) -} - -// SetTypeParamConstraint calls tparam.SetConstraint(constraint). -func SetTypeParamConstraint(tparam *TypeParam, constraint types.Type) { - tparam.SetConstraint(constraint) -} - -// NewSignatureType calls types.NewSignatureType. -func NewSignatureType(recv *types.Var, recvTypeParams, typeParams []*TypeParam, params, results *types.Tuple, variadic bool) *types.Signature { - return types.NewSignatureType(recv, recvTypeParams, typeParams, params, results, variadic) -} - -// ForSignature returns sig.TypeParams() -func ForSignature(sig *types.Signature) *TypeParamList { - return sig.TypeParams() -} - -// RecvTypeParams returns sig.RecvTypeParams(). -func RecvTypeParams(sig *types.Signature) *TypeParamList { - return sig.RecvTypeParams() -} - -// IsComparable calls iface.IsComparable(). -func IsComparable(iface *types.Interface) bool { - return iface.IsComparable() -} - -// IsMethodSet calls iface.IsMethodSet(). -func IsMethodSet(iface *types.Interface) bool { - return iface.IsMethodSet() -} - -// IsImplicit calls iface.IsImplicit(). -func IsImplicit(iface *types.Interface) bool { - return iface.IsImplicit() -} - -// MarkImplicit calls iface.MarkImplicit(). -func MarkImplicit(iface *types.Interface) { - iface.MarkImplicit() -} - -// ForNamed extracts the (possibly empty) type parameter object list from -// named. -func ForNamed(named *types.Named) *TypeParamList { - return named.TypeParams() -} - -// SetForNamed sets the type params tparams on n. Each tparam must be of -// dynamic type *types.TypeParam. -func SetForNamed(n *types.Named, tparams []*TypeParam) { - n.SetTypeParams(tparams) -} - -// NamedTypeArgs returns named.TypeArgs(). -func NamedTypeArgs(named *types.Named) *TypeList { - return named.TypeArgs() -} - -// NamedTypeOrigin returns named.Orig(). -func NamedTypeOrigin(named *types.Named) *types.Named { - return named.Origin() -} - -// Term is an alias for types.Term. -type Term = types.Term - -// NewTerm calls types.NewTerm. -func NewTerm(tilde bool, typ types.Type) *Term { - return types.NewTerm(tilde, typ) -} - -// Union is an alias for types.Union -type Union = types.Union - -// NewUnion calls types.NewUnion. -func NewUnion(terms []*Term) *Union { - return types.NewUnion(terms) -} - -// InitInstanceInfo initializes info to record information about type and -// function instances. -func InitInstanceInfo(info *types.Info) { - info.Instances = make(map[*ast.Ident]types.Instance) -} - -// Instance is an alias for types.Instance. -type Instance = types.Instance - -// GetInstances returns info.Instances. -func GetInstances(info *types.Info) map[*ast.Ident]Instance { - return info.Instances -} - -// Context is an alias for types.Context. -type Context = types.Context - -// NewContext calls types.NewContext. -func NewContext() *Context { - return types.NewContext() -} - -// Instantiate calls types.Instantiate. -func Instantiate(ctxt *Context, typ types.Type, targs []types.Type, validate bool) (types.Type, error) { - return types.Instantiate(ctxt, typ, targs, validate) -} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/objectpath.go b/vendor/golang.org/x/tools/internal/typesinternal/objectpath.go deleted file mode 100644 index 5e96e895..00000000 --- a/vendor/golang.org/x/tools/internal/typesinternal/objectpath.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package typesinternal - -import "go/types" - -// This file contains back doors that allow gopls to avoid method sorting when -// using the objectpath package. -// -// This is performance-critical in certain repositories, but changing the -// behavior of the objectpath package is still being discussed in -// golang/go#61443. If we decide to remove the sorting in objectpath we can -// simply delete these back doors. Otherwise, we should add a new API to -// objectpath that allows controlling the sorting. - -// SkipEncoderMethodSorting marks enc (which must be an *objectpath.Encoder) as -// not requiring sorted methods. -var SkipEncoderMethodSorting func(enc interface{}) - -// ObjectpathObject is like objectpath.Object, but allows suppressing method -// sorting. -var ObjectpathObject func(pkg *types.Package, p string, skipMethodSorting bool) (types.Object, error) diff --git a/vendor/golang.org/x/tools/internal/versions/gover.go b/vendor/golang.org/x/tools/internal/versions/gover.go new file mode 100644 index 00000000..bbabcd22 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/gover.go @@ -0,0 +1,172 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This is a fork of internal/gover for use by x/tools until +// go1.21 and earlier are no longer supported by x/tools. + +package versions + +import "strings" + +// A gover is a parsed Go gover: major[.Minor[.Patch]][kind[pre]] +// The numbers are the original decimal strings to avoid integer overflows +// and since there is very little actual math. (Probably overflow doesn't matter in practice, +// but at the time this code was written, there was an existing test that used +// go1.99999999999, which does not fit in an int on 32-bit platforms. +// The "big decimal" representation avoids the problem entirely.) +type gover struct { + major string // decimal + minor string // decimal or "" + patch string // decimal or "" + kind string // "", "alpha", "beta", "rc" + pre string // decimal or "" +} + +// compare returns -1, 0, or +1 depending on whether +// x < y, x == y, or x > y, interpreted as toolchain versions. +// The versions x and y must not begin with a "go" prefix: just "1.21" not "go1.21". +// Malformed versions compare less than well-formed versions and equal to each other. +// The language version "1.21" compares less than the release candidate and eventual releases "1.21rc1" and "1.21.0". +func compare(x, y string) int { + vx := parse(x) + vy := parse(y) + + if c := cmpInt(vx.major, vy.major); c != 0 { + return c + } + if c := cmpInt(vx.minor, vy.minor); c != 0 { + return c + } + if c := cmpInt(vx.patch, vy.patch); c != 0 { + return c + } + if c := strings.Compare(vx.kind, vy.kind); c != 0 { // "" < alpha < beta < rc + return c + } + if c := cmpInt(vx.pre, vy.pre); c != 0 { + return c + } + return 0 +} + +// lang returns the Go language version. For example, lang("1.2.3") == "1.2". +func lang(x string) string { + v := parse(x) + if v.minor == "" || v.major == "1" && v.minor == "0" { + return v.major + } + return v.major + "." + v.minor +} + +// isValid reports whether the version x is valid. +func isValid(x string) bool { + return parse(x) != gover{} +} + +// parse parses the Go version string x into a version. +// It returns the zero version if x is malformed. +func parse(x string) gover { + var v gover + + // Parse major version. + var ok bool + v.major, x, ok = cutInt(x) + if !ok { + return gover{} + } + if x == "" { + // Interpret "1" as "1.0.0". + v.minor = "0" + v.patch = "0" + return v + } + + // Parse . before minor version. + if x[0] != '.' { + return gover{} + } + + // Parse minor version. + v.minor, x, ok = cutInt(x[1:]) + if !ok { + return gover{} + } + if x == "" { + // Patch missing is same as "0" for older versions. + // Starting in Go 1.21, patch missing is different from explicit .0. + if cmpInt(v.minor, "21") < 0 { + v.patch = "0" + } + return v + } + + // Parse patch if present. + if x[0] == '.' { + v.patch, x, ok = cutInt(x[1:]) + if !ok || x != "" { + // Note that we are disallowing prereleases (alpha, beta, rc) for patch releases here (x != ""). + // Allowing them would be a bit confusing because we already have: + // 1.21 < 1.21rc1 + // But a prerelease of a patch would have the opposite effect: + // 1.21.3rc1 < 1.21.3 + // We've never needed them before, so let's not start now. + return gover{} + } + return v + } + + // Parse prerelease. + i := 0 + for i < len(x) && (x[i] < '0' || '9' < x[i]) { + if x[i] < 'a' || 'z' < x[i] { + return gover{} + } + i++ + } + if i == 0 { + return gover{} + } + v.kind, x = x[:i], x[i:] + if x == "" { + return v + } + v.pre, x, ok = cutInt(x) + if !ok || x != "" { + return gover{} + } + + return v +} + +// cutInt scans the leading decimal number at the start of x to an integer +// and returns that value and the rest of the string. +func cutInt(x string) (n, rest string, ok bool) { + i := 0 + for i < len(x) && '0' <= x[i] && x[i] <= '9' { + i++ + } + if i == 0 || x[0] == '0' && i != 1 { // no digits or unnecessary leading zero + return "", "", false + } + return x[:i], x[i:], true +} + +// cmpInt returns cmp.Compare(x, y) interpreting x and y as decimal numbers. +// (Copied from golang.org/x/mod/semver's compareInt.) +func cmpInt(x, y string) int { + if x == y { + return 0 + } + if len(x) < len(y) { + return -1 + } + if len(x) > len(y) { + return +1 + } + if x < y { + return -1 + } else { + return +1 + } +} diff --git a/vendor/golang.org/x/tools/internal/versions/types.go b/vendor/golang.org/x/tools/internal/versions/types.go new file mode 100644 index 00000000..562eef21 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/types.go @@ -0,0 +1,19 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package versions + +import ( + "go/types" +) + +// GoVersion returns the Go version of the type package. +// It returns zero if no version can be determined. +func GoVersion(pkg *types.Package) string { + // TODO(taking): x/tools can call GoVersion() [from 1.21] after 1.25. + if pkg, ok := any(pkg).(interface{ GoVersion() string }); ok { + return pkg.GoVersion() + } + return "" +} diff --git a/vendor/golang.org/x/tools/internal/versions/types_go121.go b/vendor/golang.org/x/tools/internal/versions/types_go121.go new file mode 100644 index 00000000..a7b79207 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/types_go121.go @@ -0,0 +1,20 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.22 +// +build !go1.22 + +package versions + +import ( + "go/ast" + "go/types" +) + +// FileVersions always reports the a file's Go version as the +// zero version at this Go version. +func FileVersions(info *types.Info, file *ast.File) string { return "" } + +// InitFileVersions is a noop at this Go version. +func InitFileVersions(*types.Info) {} diff --git a/vendor/golang.org/x/tools/internal/versions/types_go122.go b/vendor/golang.org/x/tools/internal/versions/types_go122.go new file mode 100644 index 00000000..7b9ba89a --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/types_go122.go @@ -0,0 +1,24 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.22 +// +build go1.22 + +package versions + +import ( + "go/ast" + "go/types" +) + +// FileVersions maps a file to the file's semantic Go version. +// The reported version is the zero version if a version cannot be determined. +func FileVersions(info *types.Info, file *ast.File) string { + return info.FileVersions[file] +} + +// InitFileVersions initializes info to record Go versions for Go files. +func InitFileVersions(info *types.Info) { + info.FileVersions = make(map[*ast.File]string) +} diff --git a/vendor/golang.org/x/tools/internal/versions/versions_go121.go b/vendor/golang.org/x/tools/internal/versions/versions_go121.go new file mode 100644 index 00000000..cf4a7d03 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/versions_go121.go @@ -0,0 +1,49 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.22 +// +build !go1.22 + +package versions + +// Lang returns the Go language version for version x. +// If x is not a valid version, Lang returns the empty string. +// For example: +// +// Lang("go1.21rc2") = "go1.21" +// Lang("go1.21.2") = "go1.21" +// Lang("go1.21") = "go1.21" +// Lang("go1") = "go1" +// Lang("bad") = "" +// Lang("1.21") = "" +func Lang(x string) string { + v := lang(stripGo(x)) + if v == "" { + return "" + } + return x[:2+len(v)] // "go"+v without allocation +} + +// Compare returns -1, 0, or +1 depending on whether +// x < y, x == y, or x > y, interpreted as Go versions. +// The versions x and y must begin with a "go" prefix: "go1.21" not "1.21". +// Invalid versions, including the empty string, compare less than +// valid versions and equal to each other. +// The language version "go1.21" compares less than the +// release candidate and eventual releases "go1.21rc1" and "go1.21.0". +// Custom toolchain suffixes are ignored during comparison: +// "go1.21.0" and "go1.21.0-bigcorp" are equal. +func Compare(x, y string) int { return compare(stripGo(x), stripGo(y)) } + +// IsValid reports whether the version x is valid. +func IsValid(x string) bool { return isValid(stripGo(x)) } + +// stripGo converts from a "go1.21" version to a "1.21" version. +// If v does not start with "go", stripGo returns the empty string (a known invalid version). +func stripGo(v string) string { + if len(v) < 2 || v[:2] != "go" { + return "" + } + return v[2:] +} diff --git a/vendor/golang.org/x/tools/internal/versions/versions_go122.go b/vendor/golang.org/x/tools/internal/versions/versions_go122.go new file mode 100644 index 00000000..c1c1814b --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/versions_go122.go @@ -0,0 +1,38 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.22 +// +build go1.22 + +package versions + +import ( + "go/version" +) + +// Lang returns the Go language version for version x. +// If x is not a valid version, Lang returns the empty string. +// For example: +// +// Lang("go1.21rc2") = "go1.21" +// Lang("go1.21.2") = "go1.21" +// Lang("go1.21") = "go1.21" +// Lang("go1") = "go1" +// Lang("bad") = "" +// Lang("1.21") = "" +func Lang(x string) string { return version.Lang(x) } + +// Compare returns -1, 0, or +1 depending on whether +// x < y, x == y, or x > y, interpreted as Go versions. +// The versions x and y must begin with a "go" prefix: "go1.21" not "1.21". +// Invalid versions, including the empty string, compare less than +// valid versions and equal to each other. +// The language version "go1.21" compares less than the +// release candidate and eventual releases "go1.21rc1" and "go1.21.0". +// Custom toolchain suffixes are ignored during comparison: +// "go1.21.0" and "go1.21.0-bigcorp" are equal. +func Compare(x, y string) int { return version.Compare(x, y) } + +// IsValid reports whether the version x is valid. +func IsValid(x string) bool { return version.IsValid(x) } diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_wrapper.go similarity index 57% rename from vendor/google.golang.org/grpc/balancer_conn_wrappers.go rename to vendor/google.golang.org/grpc/balancer_wrapper.go index a4411c22..b5e30cff 100644 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -32,21 +32,13 @@ import ( "google.golang.org/grpc/resolver" ) -type ccbMode int - -const ( - ccbModeActive = iota - ccbModeIdle - ccbModeClosed - ccbModeExitingIdle -) - // ccBalancerWrapper sits between the ClientConn and the Balancer. // // ccBalancerWrapper implements methods corresponding to the ones on the // balancer.Balancer interface. The ClientConn is free to call these methods // concurrently and the ccBalancerWrapper ensures that calls from the ClientConn -// to the Balancer happen synchronously and in order. +// to the Balancer happen in order by performing them in the serializer, without +// any mutexes held. // // ccBalancerWrapper also implements the balancer.ClientConn interface and is // passed to the Balancer implementations. It invokes unexported methods on the @@ -57,87 +49,75 @@ const ( type ccBalancerWrapper struct { // The following fields are initialized when the wrapper is created and are // read-only afterwards, and therefore can be accessed without a mutex. - cc *ClientConn - opts balancer.BuildOptions + cc *ClientConn + opts balancer.BuildOptions + serializer *grpcsync.CallbackSerializer + serializerCancel context.CancelFunc - // Outgoing (gRPC --> balancer) calls are guaranteed to execute in a - // mutually exclusive manner as they are scheduled in the serializer. Fields - // accessed *only* in these serializer callbacks, can therefore be accessed - // without a mutex. - balancer *gracefulswitch.Balancer + // The following fields are only accessed within the serializer or during + // initialization. curBalancerName string + balancer *gracefulswitch.Balancer - // mu guards access to the below fields. Access to the serializer and its - // cancel function needs to be mutex protected because they are overwritten - // when the wrapper exits idle mode. - mu sync.Mutex - serializer *grpcsync.CallbackSerializer // To serialize all outoing calls. - serializerCancel context.CancelFunc // To close the seralizer at close/enterIdle time. - mode ccbMode // Tracks the current mode of the wrapper. + // The following field is protected by mu. Caller must take cc.mu before + // taking mu. + mu sync.Mutex + closed bool } -// newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer -// is not created until the switchTo() method is invoked. -func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalancerWrapper { - ctx, cancel := context.WithCancel(context.Background()) +// newCCBalancerWrapper creates a new balancer wrapper in idle state. The +// underlying balancer is not created until the switchTo() method is invoked. +func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper { + ctx, cancel := context.WithCancel(cc.ctx) ccb := &ccBalancerWrapper{ - cc: cc, - opts: bopts, + cc: cc, + opts: balancer.BuildOptions{ + DialCreds: cc.dopts.copts.TransportCredentials, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + Authority: cc.authority, + CustomUserAgent: cc.dopts.copts.UserAgent, + ChannelzParentID: cc.channelzID, + Target: cc.parsedTarget, + }, serializer: grpcsync.NewCallbackSerializer(ctx), serializerCancel: cancel, } - ccb.balancer = gracefulswitch.NewBalancer(ccb, bopts) + ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts) return ccb } // updateClientConnState is invoked by grpc to push a ClientConnState update to -// the underlying balancer. +// the underlying balancer. This is always executed from the serializer, so +// it is safe to call into the balancer here. func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { - ccb.mu.Lock() - errCh := make(chan error, 1) - // Here and everywhere else where Schedule() is called, it is done with the - // lock held. But the lock guards only the scheduling part. The actual - // callback is called asynchronously without the lock being held. - ok := ccb.serializer.Schedule(func(_ context.Context) { - errCh <- ccb.balancer.UpdateClientConnState(*ccs) + errCh := make(chan error) + ok := ccb.serializer.Schedule(func(ctx context.Context) { + defer close(errCh) + if ctx.Err() != nil || ccb.balancer == nil { + return + } + err := ccb.balancer.UpdateClientConnState(*ccs) + if logger.V(2) && err != nil { + logger.Infof("error from balancer.UpdateClientConnState: %v", err) + } + errCh <- err }) if !ok { - // If we are unable to schedule a function with the serializer, it - // indicates that it has been closed. A serializer is only closed when - // the wrapper is closed or is in idle. - ccb.mu.Unlock() - return fmt.Errorf("grpc: cannot send state update to a closed or idle balancer") - } - ccb.mu.Unlock() - - // We get here only if the above call to Schedule succeeds, in which case it - // is guaranteed that the scheduled function will run. Therefore it is safe - // to block on this channel. - err := <-errCh - if logger.V(2) && err != nil { - logger.Infof("error from balancer.UpdateClientConnState: %v", err) + return nil } - return err -} - -// updateSubConnState is invoked by grpc to push a subConn state update to the -// underlying balancer. -func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) { - ccb.mu.Lock() - ccb.serializer.Schedule(func(_ context.Context) { - // Even though it is optional for balancers, gracefulswitch ensures - // opts.StateListener is set, so this cannot ever be nil. - sc.(*acBalancerWrapper).stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) - }) - ccb.mu.Unlock() + return <-errCh } +// resolverError is invoked by grpc to push a resolver error to the underlying +// balancer. The call to the balancer is executed from the serializer. func (ccb *ccBalancerWrapper) resolverError(err error) { - ccb.mu.Lock() - ccb.serializer.Schedule(func(_ context.Context) { + ccb.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || ccb.balancer == nil { + return + } ccb.balancer.ResolverError(err) }) - ccb.mu.Unlock() } // switchTo is invoked by grpc to instruct the balancer wrapper to switch to the @@ -151,8 +131,10 @@ func (ccb *ccBalancerWrapper) resolverError(err error) { // the ccBalancerWrapper keeps track of the current LB policy name, and skips // the graceful balancer switching process if the name does not change. func (ccb *ccBalancerWrapper) switchTo(name string) { - ccb.mu.Lock() - ccb.serializer.Schedule(func(_ context.Context) { + ccb.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || ccb.balancer == nil { + return + } // TODO: Other languages use case-sensitive balancer registries. We should // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. if strings.EqualFold(ccb.curBalancerName, name) { @@ -160,7 +142,6 @@ func (ccb *ccBalancerWrapper) switchTo(name string) { } ccb.buildLoadBalancingPolicy(name) }) - ccb.mu.Unlock() } // buildLoadBalancingPolicy performs the following: @@ -187,115 +168,49 @@ func (ccb *ccBalancerWrapper) buildLoadBalancingPolicy(name string) { ccb.curBalancerName = builder.Name() } +// close initiates async shutdown of the wrapper. cc.mu must be held when +// calling this function. To determine the wrapper has finished shutting down, +// the channel should block on ccb.serializer.Done() without cc.mu held. func (ccb *ccBalancerWrapper) close() { - channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: closing") - ccb.closeBalancer(ccbModeClosed) -} - -// enterIdleMode is invoked by grpc when the channel enters idle mode upon -// expiry of idle_timeout. This call blocks until the balancer is closed. -func (ccb *ccBalancerWrapper) enterIdleMode() { - channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: entering idle mode") - ccb.closeBalancer(ccbModeIdle) -} - -// closeBalancer is invoked when the channel is being closed or when it enters -// idle mode upon expiry of idle_timeout. -func (ccb *ccBalancerWrapper) closeBalancer(m ccbMode) { ccb.mu.Lock() - if ccb.mode == ccbModeClosed || ccb.mode == ccbModeIdle { - ccb.mu.Unlock() - return - } - - ccb.mode = m - done := ccb.serializer.Done() - b := ccb.balancer - ok := ccb.serializer.Schedule(func(_ context.Context) { - // Close the serializer to ensure that no more calls from gRPC are sent - // to the balancer. - ccb.serializerCancel() - // Empty the current balancer name because we don't have a balancer - // anymore and also so that we act on the next call to switchTo by - // creating a new balancer specified by the new resolver. - ccb.curBalancerName = "" - }) - if !ok { - ccb.mu.Unlock() - return - } + ccb.closed = true ccb.mu.Unlock() - - // Give enqueued callbacks a chance to finish before closing the balancer. - <-done - b.Close() -} - -// exitIdleMode is invoked by grpc when the channel exits idle mode either -// because of an RPC or because of an invocation of the Connect() API. This -// recreates the balancer that was closed previously when entering idle mode. -// -// If the channel is not in idle mode, we know for a fact that we are here as a -// result of the user calling the Connect() method on the ClientConn. In this -// case, we can simply forward the call to the underlying balancer, instructing -// it to reconnect to the backends. -func (ccb *ccBalancerWrapper) exitIdleMode() { - ccb.mu.Lock() - if ccb.mode == ccbModeClosed { - // Request to exit idle is a no-op when wrapper is already closed. - ccb.mu.Unlock() - return - } - - if ccb.mode == ccbModeIdle { - // Recreate the serializer which was closed when we entered idle. - ctx, cancel := context.WithCancel(context.Background()) - ccb.serializer = grpcsync.NewCallbackSerializer(ctx) - ccb.serializerCancel = cancel - } - - // The ClientConn guarantees that mutual exclusion between close() and - // exitIdleMode(), and since we just created a new serializer, we can be - // sure that the below function will be scheduled. - done := make(chan struct{}) - ccb.serializer.Schedule(func(_ context.Context) { - defer close(done) - - ccb.mu.Lock() - defer ccb.mu.Unlock() - - if ccb.mode != ccbModeIdle { - ccb.balancer.ExitIdle() + channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: closing") + ccb.serializer.Schedule(func(context.Context) { + if ccb.balancer == nil { return } - - // Gracefulswitch balancer does not support a switchTo operation after - // being closed. Hence we need to create a new one here. - ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts) - ccb.mode = ccbModeActive - channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: exiting idle mode") - + ccb.balancer.Close() + ccb.balancer = nil }) - ccb.mu.Unlock() - - <-done + ccb.serializerCancel() } -func (ccb *ccBalancerWrapper) isIdleOrClosed() bool { - ccb.mu.Lock() - defer ccb.mu.Unlock() - return ccb.mode == ccbModeIdle || ccb.mode == ccbModeClosed +// exitIdle invokes the balancer's exitIdle method in the serializer. +func (ccb *ccBalancerWrapper) exitIdle() { + ccb.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || ccb.balancer == nil { + return + } + ccb.balancer.ExitIdle() + }) } func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { - if ccb.isIdleOrClosed() { - return nil, fmt.Errorf("grpc: cannot create SubConn when balancer is closed or idle") + ccb.cc.mu.Lock() + defer ccb.cc.mu.Unlock() + + ccb.mu.Lock() + if ccb.closed { + ccb.mu.Unlock() + return nil, fmt.Errorf("balancer is being closed; no new SubConns allowed") } + ccb.mu.Unlock() if len(addrs) == 0 { return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") } - ac, err := ccb.cc.newAddrConn(addrs, opts) + ac, err := ccb.cc.newAddrConnLocked(addrs, opts) if err != nil { channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) return nil, err @@ -316,10 +231,6 @@ func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { } func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { - if ccb.isIdleOrClosed() { - return - } - acbw, ok := sc.(*acBalancerWrapper) if !ok { return @@ -328,25 +239,39 @@ func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resol } func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { - if ccb.isIdleOrClosed() { + ccb.cc.mu.Lock() + defer ccb.cc.mu.Unlock() + + ccb.mu.Lock() + if ccb.closed { + ccb.mu.Unlock() return } - + ccb.mu.Unlock() // Update picker before updating state. Even though the ordering here does // not matter, it can lead to multiple calls of Pick in the common start-up // case where we wait for ready and then perform an RPC. If the picker is // updated later, we could call the "connecting" picker when the state is // updated, and then call the "ready" picker after the picker gets updated. - ccb.cc.blockingpicker.updatePicker(s.Picker) + + // Note that there is no need to check if the balancer wrapper was closed, + // as we know the graceful switch LB policy will not call cc if it has been + // closed. + ccb.cc.pickerWrapper.updatePicker(s.Picker) ccb.cc.csMgr.updateState(s.ConnectivityState) } func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) { - if ccb.isIdleOrClosed() { + ccb.cc.mu.RLock() + defer ccb.cc.mu.RUnlock() + + ccb.mu.Lock() + if ccb.closed { + ccb.mu.Unlock() return } - - ccb.cc.resolveNow(o) + ccb.mu.Unlock() + ccb.cc.resolveNowLocked(o) } func (ccb *ccBalancerWrapper) Target() string { @@ -364,6 +289,20 @@ type acBalancerWrapper struct { producers map[balancer.ProducerBuilder]*refCountedProducer } +// updateState is invoked by grpc to push a subConn state update to the +// underlying balancer. +func (acbw *acBalancerWrapper) updateState(s connectivity.State, err error) { + acbw.ccb.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || acbw.ccb.balancer == nil { + return + } + // Even though it is optional for balancers, gracefulswitch ensures + // opts.StateListener is set, so this cannot ever be nil. + // TODO: delete this comment when UpdateSubConnState is removed. + acbw.stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) + }) +} + func (acbw *acBalancerWrapper) String() string { return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelzID.Int()) } @@ -377,20 +316,7 @@ func (acbw *acBalancerWrapper) Connect() { } func (acbw *acBalancerWrapper) Shutdown() { - ccb := acbw.ccb - if ccb.isIdleOrClosed() { - // It it safe to ignore this call when the balancer is closed or in idle - // because the ClientConn takes care of closing the connections. - // - // Not returning early from here when the balancer is closed or in idle - // leads to a deadlock though, because of the following sequence of - // calls when holding cc.mu: - // cc.exitIdleMode --> ccb.enterIdleMode --> gsw.Close --> - // ccb.RemoveAddrConn --> cc.removeAddrConn - return - } - - ccb.cc.removeAddrConn(acbw.ac, errConnDrain) + acbw.ccb.cc.removeAddrConn(acbw.ac, errConnDrain) } // NewStream begins a streaming RPC on the addrConn. If the addrConn is not diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 429c389e..e6f2625b 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -33,9 +33,7 @@ import ( "google.golang.org/grpc/balancer/base" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal" - "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/idle" @@ -48,9 +46,9 @@ import ( "google.golang.org/grpc/status" _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin. - _ "google.golang.org/grpc/internal/resolver/dns" // To register dns resolver. _ "google.golang.org/grpc/internal/resolver/passthrough" // To register passthrough resolver. _ "google.golang.org/grpc/internal/resolver/unix" // To register unix resolver. + _ "google.golang.org/grpc/resolver/dns" // To register dns resolver. ) const ( @@ -119,23 +117,8 @@ func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*ires }, nil } -// DialContext creates a client connection to the given target. By default, it's -// a non-blocking dial (the function won't wait for connections to be -// established, and connecting happens in the background). To make it a blocking -// dial, use WithBlock() dial option. -// -// In the non-blocking case, the ctx does not act against the connection. It -// only controls the setup steps. -// -// In the blocking case, ctx can be used to cancel or expire the pending -// connection. Once this function returns, the cancellation and expiration of -// ctx will be noop. Users should call ClientConn.Close to terminate all the -// pending operations after this function returns. -// -// The target name syntax is defined in -// https://github.com/grpc/grpc/blob/master/doc/naming.md. -// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. -func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { +// newClient returns a new client in idle mode. +func newClient(target string, opts ...DialOption) (conn *ClientConn, err error) { cc := &ClientConn{ target: target, conns: make(map[*addrConn]struct{}), @@ -143,23 +126,11 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * czData: new(channelzData), } - // We start the channel off in idle mode, but kick it out of idle at the end - // of this method, instead of waiting for the first RPC. Other gRPC - // implementations do wait for the first RPC to kick the channel out of - // idle. But doing so would be a major behavior change for our users who are - // used to seeing the channel active after Dial. - // - // Taking this approach of kicking it out of idle at the end of this method - // allows us to share the code between channel creation and exiting idle - // mode. This will also make it easy for us to switch to starting the - // channel off in idle, if at all we ever get to do that. - cc.idlenessState = ccIdlenessStateIdle - cc.retryThrottler.Store((*retryThrottler)(nil)) cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) cc.ctx, cc.cancel = context.WithCancel(context.Background()) - cc.exitIdleCond = sync.NewCond(&cc.mu) + // Apply dial options. disableGlobalOpts := false for _, opt := range opts { if _, ok := opt.(*disableGlobalDialOptions); ok { @@ -177,21 +148,9 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * for _, opt := range opts { opt.apply(&cc.dopts) } - chainUnaryClientInterceptors(cc) chainStreamClientInterceptors(cc) - defer func() { - if err != nil { - cc.Close() - } - }() - - // Register ClientConn with channelz. - cc.channelzRegistration(target) - - cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelzID) - if err := cc.validateTransportCredentials(); err != nil { return nil, err } @@ -205,10 +164,80 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } cc.mkp = cc.dopts.copts.KeepaliveParams - if cc.dopts.copts.UserAgent != "" { - cc.dopts.copts.UserAgent += " " + grpcUA - } else { - cc.dopts.copts.UserAgent = grpcUA + // Register ClientConn with channelz. + cc.channelzRegistration(target) + + // TODO: Ideally it should be impossible to error from this function after + // channelz registration. This will require removing some channelz logs + // from the following functions that can error. Errors can be returned to + // the user, and successful logs can be emitted here, after the checks have + // passed and channelz is subsequently registered. + + // Determine the resolver to use. + if err := cc.parseTargetAndFindResolver(); err != nil { + channelz.RemoveEntry(cc.channelzID) + return nil, err + } + if err = cc.determineAuthority(); err != nil { + channelz.RemoveEntry(cc.channelzID) + return nil, err + } + + cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelzID) + cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers) + + cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc. + cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout) + return cc, nil +} + +// DialContext creates a client connection to the given target. By default, it's +// a non-blocking dial (the function won't wait for connections to be +// established, and connecting happens in the background). To make it a blocking +// dial, use WithBlock() dial option. +// +// In the non-blocking case, the ctx does not act against the connection. It +// only controls the setup steps. +// +// In the blocking case, ctx can be used to cancel or expire the pending +// connection. Once this function returns, the cancellation and expiration of +// ctx will be noop. Users should call ClientConn.Close to terminate all the +// pending operations after this function returns. +// +// The target name syntax is defined in +// https://github.com/grpc/grpc/blob/master/doc/naming.md. +// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. +func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { + cc, err := newClient(target, opts...) + if err != nil { + return nil, err + } + + // We start the channel off in idle mode, but kick it out of idle now, + // instead of waiting for the first RPC. Other gRPC implementations do wait + // for the first RPC to kick the channel out of idle. But doing so would be + // a major behavior change for our users who are used to seeing the channel + // active after Dial. + // + // Taking this approach of kicking it out of idle at the end of this method + // allows us to share the code between channel creation and exiting idle + // mode. This will also make it easy for us to switch to starting the + // channel off in idle, i.e. by making newClient exported. + + defer func() { + if err != nil { + cc.Close() + } + }() + + // This creates the name resolver, load balancer, etc. + if err := cc.idlenessMgr.ExitIdleMode(); err != nil { + return nil, err + } + + // Return now for non-blocking dials. + if !cc.dopts.block { + return cc, nil } if cc.dopts.timeout > 0 { @@ -231,49 +260,6 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } }() - if cc.dopts.bs == nil { - cc.dopts.bs = backoff.DefaultExponential - } - - // Determine the resolver to use. - if err := cc.parseTargetAndFindResolver(); err != nil { - return nil, err - } - if err = cc.determineAuthority(); err != nil { - return nil, err - } - - if cc.dopts.scChan != nil { - // Blocking wait for the initial service config. - select { - case sc, ok := <-cc.dopts.scChan: - if ok { - cc.sc = &sc - cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) - } - case <-ctx.Done(): - return nil, ctx.Err() - } - } - if cc.dopts.scChan != nil { - go cc.scWatcher() - } - - // This creates the name resolver, load balancer, blocking picker etc. - if err := cc.exitIdleMode(); err != nil { - return nil, err - } - - // Configure idleness support with configured idle timeout or default idle - // timeout duration. Idleness can be explicitly disabled by the user, by - // setting the dial option to 0. - cc.idlenessMgr = idle.NewManager(idle.ManagerOptions{Enforcer: (*idler)(cc), Timeout: cc.dopts.idleTimeout, Logger: logger}) - - // Return early for non-blocking dials. - if !cc.dopts.block { - return cc, nil - } - // A blocking dial blocks until the clientConn is ready. for { s := cc.GetState() @@ -320,8 +306,8 @@ func (cc *ClientConn) addTraceEvent(msg string) { type idler ClientConn -func (i *idler) EnterIdleMode() error { - return (*ClientConn)(i).enterIdleMode() +func (i *idler) EnterIdleMode() { + (*ClientConn)(i).enterIdleMode() } func (i *idler) ExitIdleMode() error { @@ -329,117 +315,71 @@ func (i *idler) ExitIdleMode() error { } // exitIdleMode moves the channel out of idle mode by recreating the name -// resolver and load balancer. -func (cc *ClientConn) exitIdleMode() error { +// resolver and load balancer. This should never be called directly; use +// cc.idlenessMgr.ExitIdleMode instead. +func (cc *ClientConn) exitIdleMode() (err error) { cc.mu.Lock() if cc.conns == nil { cc.mu.Unlock() return errConnClosing } - if cc.idlenessState != ccIdlenessStateIdle { - channelz.Infof(logger, cc.channelzID, "ClientConn asked to exit idle mode, current mode is %v", cc.idlenessState) - cc.mu.Unlock() - return nil - } - - defer func() { - // When Close() and exitIdleMode() race against each other, one of the - // following two can happen: - // - Close() wins the race and runs first. exitIdleMode() runs after, and - // sees that the ClientConn is already closed and hence returns early. - // - exitIdleMode() wins the race and runs first and recreates the balancer - // and releases the lock before recreating the resolver. If Close() runs - // in this window, it will wait for exitIdleMode to complete. - // - // We achieve this synchronization using the below condition variable. - cc.mu.Lock() - cc.idlenessState = ccIdlenessStateActive - cc.exitIdleCond.Signal() - cc.mu.Unlock() - }() - - cc.idlenessState = ccIdlenessStateExitingIdle - exitedIdle := false - if cc.blockingpicker == nil { - cc.blockingpicker = newPickerWrapper(cc.dopts.copts.StatsHandlers) - } else { - cc.blockingpicker.exitIdleMode() - exitedIdle = true - } - - var credsClone credentials.TransportCredentials - if creds := cc.dopts.copts.TransportCredentials; creds != nil { - credsClone = creds.Clone() - } - if cc.balancerWrapper == nil { - cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{ - DialCreds: credsClone, - CredsBundle: cc.dopts.copts.CredsBundle, - Dialer: cc.dopts.copts.Dialer, - Authority: cc.authority, - CustomUserAgent: cc.dopts.copts.UserAgent, - ChannelzParentID: cc.channelzID, - Target: cc.parsedTarget, - }) - } else { - cc.balancerWrapper.exitIdleMode() - } - cc.firstResolveEvent = grpcsync.NewEvent() cc.mu.Unlock() // This needs to be called without cc.mu because this builds a new resolver - // which might update state or report error inline which needs to be handled - // by cc.updateResolverState() which also grabs cc.mu. - if err := cc.initResolverWrapper(credsClone); err != nil { + // which might update state or report error inline, which would then need to + // acquire cc.mu. + if err := cc.resolverWrapper.start(); err != nil { return err } - if exitedIdle { - cc.addTraceEvent("exiting idle mode") - } + cc.addTraceEvent("exiting idle mode") return nil } +// initIdleStateLocked initializes common state to how it should be while idle. +func (cc *ClientConn) initIdleStateLocked() { + cc.resolverWrapper = newCCResolverWrapper(cc) + cc.balancerWrapper = newCCBalancerWrapper(cc) + cc.firstResolveEvent = grpcsync.NewEvent() + // cc.conns == nil is a proxy for the ClientConn being closed. So, instead + // of setting it to nil here, we recreate the map. This also means that we + // don't have to do this when exiting idle mode. + cc.conns = make(map[*addrConn]struct{}) +} + // enterIdleMode puts the channel in idle mode, and as part of it shuts down the -// name resolver, load balancer and any subchannels. -func (cc *ClientConn) enterIdleMode() error { +// name resolver, load balancer, and any subchannels. This should never be +// called directly; use cc.idlenessMgr.EnterIdleMode instead. +func (cc *ClientConn) enterIdleMode() { cc.mu.Lock() - defer cc.mu.Unlock() if cc.conns == nil { - return ErrClientConnClosing - } - if cc.idlenessState != ccIdlenessStateActive { - channelz.Warningf(logger, cc.channelzID, "ClientConn asked to enter idle mode, current mode is %v", cc.idlenessState) - return nil + cc.mu.Unlock() + return } - // cc.conns == nil is a proxy for the ClientConn being closed. So, instead - // of setting it to nil here, we recreate the map. This also means that we - // don't have to do this when exiting idle mode. conns := cc.conns - cc.conns = make(map[*addrConn]struct{}) - // TODO: Currently, we close the resolver wrapper upon entering idle mode - // and create a new one upon exiting idle mode. This means that the - // `cc.resolverWrapper` field would be overwritten everytime we exit idle - // mode. While this means that we need to hold `cc.mu` when accessing - // `cc.resolverWrapper`, it makes the code simpler in the wrapper. We should - // try to do the same for the balancer and picker wrappers too. - cc.resolverWrapper.close() - cc.blockingpicker.enterIdleMode() - cc.balancerWrapper.enterIdleMode() + rWrapper := cc.resolverWrapper + rWrapper.close() + cc.pickerWrapper.reset() + bWrapper := cc.balancerWrapper + bWrapper.close() cc.csMgr.updateState(connectivity.Idle) - cc.idlenessState = ccIdlenessStateIdle cc.addTraceEvent("entering idle mode") - go func() { - for ac := range conns { - ac.tearDown(errConnIdling) - } - }() + cc.initIdleStateLocked() - return nil + cc.mu.Unlock() + + // Block until the name resolver and LB policy are closed. + <-rWrapper.serializer.Done() + <-bWrapper.serializer.Done() + + // Close all subchannels after the LB policy is closed. + for ac := range conns { + ac.tearDown(errConnIdling) + } } // validateTransportCredentials performs a series of checks on the configured @@ -649,66 +589,35 @@ type ClientConn struct { dopts dialOptions // Default and user specified dial options. channelzID *channelz.Identifier // Channelz identifier for the channel. resolverBuilder resolver.Builder // See parseTargetAndFindResolver(). - balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath. - idlenessMgr idle.Manager + idlenessMgr *idle.Manager // The following provide their own synchronization, and therefore don't // require cc.mu to be held to access them. csMgr *connectivityStateManager - blockingpicker *pickerWrapper + pickerWrapper *pickerWrapper safeConfigSelector iresolver.SafeConfigSelector czData *channelzData retryThrottler atomic.Value // Updated from service config. - // firstResolveEvent is used to track whether the name resolver sent us at - // least one update. RPCs block on this event. - firstResolveEvent *grpcsync.Event - // mu protects the following fields. // TODO: split mu so the same mutex isn't used for everything. mu sync.RWMutex - resolverWrapper *ccResolverWrapper // Initialized in Dial; cleared in Close. + resolverWrapper *ccResolverWrapper // Always recreated whenever entering idle to simplify Close. + balancerWrapper *ccBalancerWrapper // Always recreated whenever entering idle to simplify Close. sc *ServiceConfig // Latest service config received from the resolver. conns map[*addrConn]struct{} // Set to nil on close. mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway. - idlenessState ccIdlenessState // Tracks idleness state of the channel. - exitIdleCond *sync.Cond // Signalled when channel exits idle. + // firstResolveEvent is used to track whether the name resolver sent us at + // least one update. RPCs block on this event. May be accessed without mu + // if we know we cannot be asked to enter idle mode while accessing it (e.g. + // when the idle manager has already been closed, or if we are already + // entering idle mode). + firstResolveEvent *grpcsync.Event lceMu sync.Mutex // protects lastConnectionError lastConnectionError error } -// ccIdlenessState tracks the idleness state of the channel. -// -// Channels start off in `active` and move to `idle` after a period of -// inactivity. When moving back to `active` upon an incoming RPC, they -// transition through `exiting_idle`. This state is useful for synchronization -// with Close(). -// -// This state tracking is mostly for self-protection. The idlenessManager is -// expected to keep track of the state as well, and is expected not to call into -// the ClientConn unnecessarily. -type ccIdlenessState int8 - -const ( - ccIdlenessStateActive ccIdlenessState = iota - ccIdlenessStateIdle - ccIdlenessStateExitingIdle -) - -func (s ccIdlenessState) String() string { - switch s { - case ccIdlenessStateActive: - return "active" - case ccIdlenessStateIdle: - return "idle" - case ccIdlenessStateExitingIdle: - return "exitingIdle" - default: - return "unknown" - } -} - // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or // ctx expires. A true value is returned in former case and false in latter. // @@ -748,29 +657,15 @@ func (cc *ClientConn) GetState() connectivity.State { // Notice: This API is EXPERIMENTAL and may be changed or removed in a later // release. func (cc *ClientConn) Connect() { - cc.exitIdleMode() + if err := cc.idlenessMgr.ExitIdleMode(); err != nil { + cc.addTraceEvent(err.Error()) + return + } // If the ClientConn was not in idle mode, we need to call ExitIdle on the // LB policy so that connections can be created. - cc.balancerWrapper.exitIdleMode() -} - -func (cc *ClientConn) scWatcher() { - for { - select { - case sc, ok := <-cc.dopts.scChan: - if !ok { - return - } - cc.mu.Lock() - // TODO: load balance policy runtime change is ignored. - // We may revisit this decision in the future. - cc.sc = &sc - cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) - cc.mu.Unlock() - case <-cc.ctx.Done(): - return - } - } + cc.mu.Lock() + cc.balancerWrapper.exitIdle() + cc.mu.Unlock() } // waitForResolvedAddrs blocks until the resolver has provided addresses or the @@ -804,11 +699,11 @@ func init() { internal.SubscribeToConnectivityStateChanges = func(cc *ClientConn, s grpcsync.Subscriber) func() { return cc.csMgr.pubSub.Subscribe(s) } - internal.EnterIdleModeForTesting = func(cc *ClientConn) error { - return cc.enterIdleMode() + internal.EnterIdleModeForTesting = func(cc *ClientConn) { + cc.idlenessMgr.EnterIdleModeForTesting() } internal.ExitIdleModeForTesting = func(cc *ClientConn) error { - return cc.exitIdleMode() + return cc.idlenessMgr.ExitIdleMode() } } @@ -824,9 +719,8 @@ func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { } } -func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { +func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error) error { defer cc.firstResolveEvent.Fire() - cc.mu.Lock() // Check if the ClientConn is already closed. Some fields (e.g. // balancerWrapper) are set to nil when closing the ClientConn, and could // cause nil pointer panic if we don't have this check. @@ -872,7 +766,7 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { if cc.sc == nil { // Apply the failing LB only if we haven't received valid service config // from the name resolver in the past. - cc.applyFailingLB(s.ServiceConfig) + cc.applyFailingLBLocked(s.ServiceConfig) cc.mu.Unlock() return ret } @@ -894,15 +788,13 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { return ret } -// applyFailingLB is akin to configuring an LB policy on the channel which +// applyFailingLBLocked is akin to configuring an LB policy on the channel which // always fails RPCs. Here, an actual LB policy is not configured, but an always // erroring picker is configured, which returns errors with information about // what was invalid in the received service config. A config selector with no // service config is configured, and the connectivity state of the channel is // set to TransientFailure. -// -// Caller must hold cc.mu. -func (cc *ClientConn) applyFailingLB(sc *serviceconfig.ParseResult) { +func (cc *ClientConn) applyFailingLBLocked(sc *serviceconfig.ParseResult) { var err error if sc.Err != nil { err = status.Errorf(codes.Unavailable, "error parsing service config: %v", sc.Err) @@ -910,14 +802,10 @@ func (cc *ClientConn) applyFailingLB(sc *serviceconfig.ParseResult) { err = status.Errorf(codes.Unavailable, "illegal service config type: %T", sc.Config) } cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) - cc.blockingpicker.updatePicker(base.NewErrPicker(err)) + cc.pickerWrapper.updatePicker(base.NewErrPicker(err)) cc.csMgr.updateState(connectivity.TransientFailure) } -func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { - cc.balancerWrapper.updateSubConnState(sc, s, err) -} - // Makes a copy of the input addresses slice and clears out the balancer // attributes field. Addresses are passed during subconn creation and address // update operations. In both cases, we will clear the balancer attributes by @@ -932,10 +820,14 @@ func copyAddressesWithoutBalancerAttributes(in []resolver.Address) []resolver.Ad return out } -// newAddrConn creates an addrConn for addrs and adds it to cc.conns. +// newAddrConnLocked creates an addrConn for addrs and adds it to cc.conns. // // Caller needs to make sure len(addrs) > 0. -func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) { +func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) { + if cc.conns == nil { + return nil, ErrClientConnClosing + } + ac := &addrConn{ state: connectivity.Idle, cc: cc, @@ -947,12 +839,6 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub stateChan: make(chan struct{}), } ac.ctx, ac.cancel = context.WithCancel(cc.ctx) - // Track ac in cc. This needs to be done before any getTransport(...) is called. - cc.mu.Lock() - defer cc.mu.Unlock() - if cc.conns == nil { - return nil, ErrClientConnClosing - } var err error ac.channelzID, err = channelz.RegisterSubChannel(ac, cc.channelzID, "") @@ -968,6 +854,7 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub }, }) + // Track ac in cc. This needs to be done before any getTransport(...) is called. cc.conns[ac] = struct{}{} return ac, nil } @@ -1174,7 +1061,7 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { } func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, balancer.PickResult, error) { - return cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ + return cc.pickerWrapper.pick(ctx, failfast, balancer.PickInfo{ Ctx: ctx, FullMethodName: method, }) @@ -1216,12 +1103,12 @@ func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSel func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { cc.mu.RLock() - r := cc.resolverWrapper + cc.resolverWrapper.resolveNow(o) cc.mu.RUnlock() - if r == nil { - return - } - go r.resolveNow(o) +} + +func (cc *ClientConn) resolveNowLocked(o resolver.ResolveNowOptions) { + cc.resolverWrapper.resolveNow(o) } // ResetConnectBackoff wakes up all subchannels in transient failure and causes @@ -1253,40 +1140,32 @@ func (cc *ClientConn) Close() error { <-cc.csMgr.pubSub.Done() }() + // Prevent calls to enter/exit idle immediately, and ensure we are not + // currently entering/exiting idle mode. + cc.idlenessMgr.Close() + cc.mu.Lock() if cc.conns == nil { cc.mu.Unlock() return ErrClientConnClosing } - for cc.idlenessState == ccIdlenessStateExitingIdle { - cc.exitIdleCond.Wait() - } - conns := cc.conns cc.conns = nil cc.csMgr.updateState(connectivity.Shutdown) - pWrapper := cc.blockingpicker - rWrapper := cc.resolverWrapper - bWrapper := cc.balancerWrapper - idlenessMgr := cc.idlenessMgr + // We can safely unlock and continue to access all fields now as + // cc.conns==nil, preventing any further operations on cc. cc.mu.Unlock() + cc.resolverWrapper.close() // The order of closing matters here since the balancer wrapper assumes the // picker is closed before it is closed. - if pWrapper != nil { - pWrapper.close() - } - if bWrapper != nil { - bWrapper.close() - } - if rWrapper != nil { - rWrapper.close() - } - if idlenessMgr != nil { - idlenessMgr.Close() - } + cc.pickerWrapper.close() + cc.balancerWrapper.close() + + <-cc.resolverWrapper.serializer.Done() + <-cc.balancerWrapper.serializer.Done() for ac := range conns { ac.tearDown(ErrClientConnClosing) @@ -1307,7 +1186,7 @@ type addrConn struct { cc *ClientConn dopts dialOptions - acbw balancer.SubConn + acbw *acBalancerWrapper scopts balancer.NewSubConnOptions // transport is set when there's a viable transport (note: ac state may not be READY as LB channel @@ -1345,7 +1224,7 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) } else { channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v, last error: %s", s, lastErr) } - ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr) + ac.acbw.updateState(s, lastErr) } // adjustParams updates parameters used to create transports upon @@ -1849,7 +1728,7 @@ func (cc *ClientConn) parseTargetAndFindResolver() error { if err != nil { channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", cc.target, err) } else { - channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) + channelz.Infof(logger, cc.channelzID, "parsed dial target is: %#v", parsedTarget) rb = cc.getResolver(parsedTarget.URL.Scheme) if rb != nil { cc.parsedTarget = parsedTarget @@ -2007,32 +1886,3 @@ func (cc *ClientConn) determineAuthority() error { channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) return nil } - -// initResolverWrapper creates a ccResolverWrapper, which builds the name -// resolver. This method grabs the lock to assign the newly built resolver -// wrapper to the cc.resolverWrapper field. -func (cc *ClientConn) initResolverWrapper(creds credentials.TransportCredentials) error { - rw, err := newCCResolverWrapper(cc, ccResolverWrapperOpts{ - target: cc.parsedTarget, - builder: cc.resolverBuilder, - bOpts: resolver.BuildOptions{ - DisableServiceConfig: cc.dopts.disableServiceConfig, - DialCreds: creds, - CredsBundle: cc.dopts.copts.CredsBundle, - Dialer: cc.dopts.copts.Dialer, - }, - channelzID: cc.channelzID, - }) - if err != nil { - return fmt.Errorf("failed to build resolver: %v", err) - } - // Resolver implementations may report state update or error inline when - // built (or right after), and this is handled in cc.updateResolverState. - // Also, an error from the resolver might lead to a re-resolution request - // from the balancer, which is handled in resolveNow() where - // `cc.resolverWrapper` is accessed. Hence, we need to hold the lock here. - cc.mu.Lock() - cc.resolverWrapper = rw - cc.mu.Unlock() - return nil -} diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go index 11b10618..08476ad1 100644 --- a/vendor/google.golang.org/grpc/codes/codes.go +++ b/vendor/google.golang.org/grpc/codes/codes.go @@ -25,7 +25,13 @@ import ( "strconv" ) -// A Code is an unsigned 32-bit error code as defined in the gRPC spec. +// A Code is a status code defined according to the [gRPC documentation]. +// +// Only the codes defined as consts in this package are valid codes. Do not use +// other code values. Behavior of other codes is implementation-specific and +// interoperability between implementations is not guaranteed. +// +// [gRPC documentation]: https://github.com/grpc/grpc/blob/master/doc/statuscodes.md type Code uint32 const ( diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index 877b7cd2..5dafd34e 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -44,10 +44,25 @@ func (t TLSInfo) AuthType() string { return "tls" } +// cipherSuiteLookup returns the string version of a TLS cipher suite ID. +func cipherSuiteLookup(cipherSuiteID uint16) string { + for _, s := range tls.CipherSuites() { + if s.ID == cipherSuiteID { + return s.Name + } + } + for _, s := range tls.InsecureCipherSuites() { + if s.ID == cipherSuiteID { + return s.Name + } + } + return fmt.Sprintf("unknown ID: %v", cipherSuiteID) +} + // GetSecurityValue returns security info requested by channelz. func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue { v := &TLSChannelzSecurityValue{ - StandardName: cipherSuiteLookup[t.State.CipherSuite], + StandardName: cipherSuiteLookup(t.State.CipherSuite), } // Currently there's no way to get LocalCertificate info from tls package. if len(t.State.PeerCertificates) > 0 { @@ -138,10 +153,39 @@ func (c *tlsCreds) OverrideServerName(serverNameOverride string) error { return nil } +// The following cipher suites are forbidden for use with HTTP/2 by +// https://datatracker.ietf.org/doc/html/rfc7540#appendix-A +var tls12ForbiddenCipherSuites = map[uint16]struct{}{ + tls.TLS_RSA_WITH_AES_128_CBC_SHA: {}, + tls.TLS_RSA_WITH_AES_256_CBC_SHA: {}, + tls.TLS_RSA_WITH_AES_128_GCM_SHA256: {}, + tls.TLS_RSA_WITH_AES_256_GCM_SHA384: {}, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: {}, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: {}, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: {}, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: {}, +} + // NewTLS uses c to construct a TransportCredentials based on TLS. func NewTLS(c *tls.Config) TransportCredentials { tc := &tlsCreds{credinternal.CloneTLSConfig(c)} tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos) + // If the user did not configure a MinVersion and did not configure a + // MaxVersion < 1.2, use MinVersion=1.2, which is required by + // https://datatracker.ietf.org/doc/html/rfc7540#section-9.2 + if tc.config.MinVersion == 0 && (tc.config.MaxVersion == 0 || tc.config.MaxVersion >= tls.VersionTLS12) { + tc.config.MinVersion = tls.VersionTLS12 + } + // If the user did not configure CipherSuites, use all "secure" cipher + // suites reported by the TLS package, but remove some explicitly forbidden + // by https://datatracker.ietf.org/doc/html/rfc7540#appendix-A + if tc.config.CipherSuites == nil { + for _, cs := range tls.CipherSuites() { + if _, ok := tls12ForbiddenCipherSuites[cs.ID]; !ok { + tc.config.CipherSuites = append(tc.config.CipherSuites, cs.ID) + } + } + } return tc } @@ -205,32 +249,3 @@ type TLSChannelzSecurityValue struct { LocalCertificate []byte RemoteCertificate []byte } - -var cipherSuiteLookup = map[uint16]string{ - tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA", - tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA", - tls.TLS_RSA_WITH_AES_128_CBC_SHA: "TLS_RSA_WITH_AES_128_CBC_SHA", - tls.TLS_RSA_WITH_AES_256_CBC_SHA: "TLS_RSA_WITH_AES_256_CBC_SHA", - tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "TLS_RSA_WITH_AES_128_GCM_SHA256", - tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "TLS_RSA_WITH_AES_256_GCM_SHA384", - tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "TLS_ECDHE_RSA_WITH_RC4_128_SHA", - tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", - tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV", - tls.TLS_RSA_WITH_AES_128_CBC_SHA256: "TLS_RSA_WITH_AES_128_CBC_SHA256", - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", - tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", - tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", - tls.TLS_AES_128_GCM_SHA256: "TLS_AES_128_GCM_SHA256", - tls.TLS_AES_256_GCM_SHA384: "TLS_AES_256_GCM_SHA384", - tls.TLS_CHACHA20_POLY1305_SHA256: "TLS_CHACHA20_POLY1305_SHA256", -} diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index cfc9fd85..ba242618 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -46,6 +46,7 @@ func init() { internal.WithBinaryLogger = withBinaryLogger internal.JoinDialOptions = newJoinDialOption internal.DisableGlobalDialOptions = newDisableGlobalDialOptions + internal.WithRecvBufferPool = withRecvBufferPool } // dialOptions configure a Dial call. dialOptions are set by the DialOption @@ -63,7 +64,6 @@ type dialOptions struct { block bool returnLastError bool timeout time.Duration - scChan <-chan ServiceConfig authority string binaryLogger binarylog.Logger copts transport.ConnectOptions @@ -250,19 +250,6 @@ func WithDecompressor(dc Decompressor) DialOption { }) } -// WithServiceConfig returns a DialOption which has a channel to read the -// service configuration. -// -// Deprecated: service config should be received through name resolver or via -// WithDefaultServiceConfig, as specified at -// https://github.com/grpc/grpc/blob/master/doc/service_config.md. Will be -// removed in a future 1.x release. -func WithServiceConfig(c <-chan ServiceConfig) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.scChan = c - }) -} - // WithConnectParams configures the ClientConn to use the provided ConnectParams // for creating and maintaining connections to servers. // @@ -413,6 +400,17 @@ func WithTimeout(d time.Duration) DialOption { // connections. If FailOnNonTempDialError() is set to true, and an error is // returned by f, gRPC checks the error's Temporary() method to decide if it // should try to reconnect to the network address. +// +// Note: All supported releases of Go (as of December 2023) override the OS +// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive +// with OS defaults for keepalive time and interval, use a net.Dialer that sets +// the KeepAlive field to a negative value, and sets the SO_KEEPALIVE socket +// option to true from the Control field. For a concrete example of how to do +// this, see internal.NetDialerWithTCPKeepalive(). +// +// For more information, please see [issue 23459] in the Go github repo. +// +// [issue 23459]: https://github.com/golang/go/issues/23459 func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.Dialer = f @@ -487,7 +485,7 @@ func FailOnNonTempDialError(f bool) DialOption { // the RPCs. func WithUserAgent(s string) DialOption { return newFuncDialOption(func(o *dialOptions) { - o.copts.UserAgent = s + o.copts.UserAgent = s + " " + grpcUA }) } @@ -637,14 +635,16 @@ func withHealthCheckFunc(f internal.HealthChecker) DialOption { func defaultDialOptions() dialOptions { return dialOptions{ - healthCheckFunc: internal.HealthCheckFunc, copts: transport.ConnectOptions{ - WriteBufferSize: defaultWriteBufSize, ReadBufferSize: defaultReadBufSize, + WriteBufferSize: defaultWriteBufSize, UseProxy: true, + UserAgent: grpcUA, }, - recvBufferPool: nopBufferPool{}, - idleTimeout: 30 * time.Minute, + bs: internalbackoff.DefaultExponential, + healthCheckFunc: internal.HealthCheckFunc, + idleTimeout: 30 * time.Minute, + recvBufferPool: nopBufferPool{}, } } @@ -705,11 +705,13 @@ func WithIdleTimeout(d time.Duration) DialOption { // options are used: WithStatsHandler, EnableTracing, or binary logging. In such // cases, the shared buffer pool will be ignored. // -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. +// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in +// v1.60.0 or later. func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption { + return withRecvBufferPool(bufferPool) +} + +func withRecvBufferPool(bufferPool SharedBufferPool) DialOption { return newFuncDialOption(func(o *dialOptions) { o.recvBufferPool = bufferPool }) diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go index 4399c3df..11f91668 100644 --- a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go +++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go @@ -18,7 +18,10 @@ // Package buffer provides an implementation of an unbounded buffer. package buffer -import "sync" +import ( + "errors" + "sync" +) // Unbounded is an implementation of an unbounded buffer which does not use // extra goroutines. This is typically used for passing updates from one entity @@ -36,6 +39,7 @@ import "sync" type Unbounded struct { c chan any closed bool + closing bool mu sync.Mutex backlog []any } @@ -45,32 +49,32 @@ func NewUnbounded() *Unbounded { return &Unbounded{c: make(chan any, 1)} } +var errBufferClosed = errors.New("Put called on closed buffer.Unbounded") + // Put adds t to the unbounded buffer. -func (b *Unbounded) Put(t any) { +func (b *Unbounded) Put(t any) error { b.mu.Lock() defer b.mu.Unlock() - if b.closed { - return + if b.closing { + return errBufferClosed } if len(b.backlog) == 0 { select { case b.c <- t: - return + return nil default: } } b.backlog = append(b.backlog, t) + return nil } -// Load sends the earliest buffered data, if any, onto the read channel -// returned by Get(). Users are expected to call this every time they read a +// Load sends the earliest buffered data, if any, onto the read channel returned +// by Get(). Users are expected to call this every time they successfully read a // value from the read channel. func (b *Unbounded) Load() { b.mu.Lock() defer b.mu.Unlock() - if b.closed { - return - } if len(b.backlog) > 0 { select { case b.c <- b.backlog[0]: @@ -78,6 +82,8 @@ func (b *Unbounded) Load() { b.backlog = b.backlog[1:] default: } + } else if b.closing && !b.closed { + close(b.c) } } @@ -88,18 +94,23 @@ func (b *Unbounded) Load() { // send the next buffered value onto the channel if there is any. // // If the unbounded buffer is closed, the read channel returned by this method -// is closed. +// is closed after all data is drained. func (b *Unbounded) Get() <-chan any { return b.c } -// Close closes the unbounded buffer. +// Close closes the unbounded buffer. No subsequent data may be Put(), and the +// channel returned from Get() will be closed after all the data is read and +// Load() is called for the final time. func (b *Unbounded) Close() { b.mu.Lock() defer b.mu.Unlock() - if b.closed { + if b.closing { return } - b.closed = true - close(b.c) + b.closing = true + if len(b.backlog) == 0 { + b.closed = true + close(b.c) + } } diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index 5395e775..fc094f34 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -31,6 +31,7 @@ import ( "time" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" ) const ( @@ -58,6 +59,12 @@ func TurnOn() { } } +func init() { + internal.ChannelzTurnOffForTesting = func() { + atomic.StoreInt32(&curState, 0) + } +} + // IsOn returns whether channelz data collection is on. func IsOn() bool { return atomic.LoadInt32(&curState) == 1 diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 3cf10ddf..685a3cb4 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -36,9 +36,6 @@ var ( // "GRPC_RING_HASH_CAP". This does not override the default bounds // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M). RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024) - // PickFirstLBConfig is set if we should support configuration of the - // pick_first LB policy. - PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", true) // LeastRequestLB is set if we should support the least_request_experimental // LB policy, which can be enabled by setting the environment variable // "GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST" to "true". diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go index 02b4b6a1..29f234ac 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -50,46 +50,7 @@ var ( // // When both bootstrap FileName and FileContent are set, FileName is used. XDSBootstrapFileContent = os.Getenv(XDSBootstrapFileContentEnv) - // XDSRingHash indicates whether ring hash support is enabled, which can be - // disabled by setting the environment variable - // "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false". - XDSRingHash = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH", true) - // XDSClientSideSecurity is used to control processing of security - // configuration on the client-side. - // - // Note that there is no env var protection for the server-side because we - // have a brand new API on the server-side and users explicitly need to use - // the new API to get security integration on the server. - XDSClientSideSecurity = boolFromEnv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT", true) - // XDSAggregateAndDNS indicates whether processing of aggregated cluster and - // DNS cluster is enabled, which can be disabled by setting the environment - // variable "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" - // to "false". - XDSAggregateAndDNS = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER", true) - - // XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled, - // which can be disabled by setting the environment variable - // "GRPC_XDS_EXPERIMENTAL_RBAC" to "false". - XDSRBAC = boolFromEnv("GRPC_XDS_EXPERIMENTAL_RBAC", true) - // XDSOutlierDetection indicates whether outlier detection support is - // enabled, which can be disabled by setting the environment variable - // "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "false". - XDSOutlierDetection = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION", true) - // XDSFederation indicates whether federation support is enabled, which can - // be enabled by setting the environment variable - // "GRPC_EXPERIMENTAL_XDS_FEDERATION" to "true". - XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", true) - - // XDSRLS indicates whether processing of Cluster Specifier plugins and - // support for the RLS CLuster Specifier is enabled, which can be disabled by - // setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to - // "false". - XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", true) // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI") - // XDSCustomLBPolicy indicates whether Custom LB Policies are enabled, which - // can be disabled by setting the environment variable - // "GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG" to "false". - XDSCustomLBPolicy = boolFromEnv("GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG", true) ) diff --git a/vendor/google.golang.org/grpc/internal/experimental.go b/vendor/google.golang.org/grpc/internal/experimental.go new file mode 100644 index 00000000..7f7044e1 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/experimental.go @@ -0,0 +1,28 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +var ( + // WithRecvBufferPool is implemented by the grpc package and returns a dial + // option to configure a shared buffer pool for a grpc.ClientConn. + WithRecvBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption + + // RecvBufferPool is implemented by the grpc package and returns a server + // option to configure a shared buffer pool for a grpc.Server. + RecvBufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption +) diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go index 900917db..f7f40a16 100644 --- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go +++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go @@ -20,7 +20,6 @@ package grpcsync import ( "context" - "sync" "google.golang.org/grpc/internal/buffer" ) @@ -38,8 +37,6 @@ type CallbackSerializer struct { done chan struct{} callbacks *buffer.Unbounded - closedMu sync.Mutex - closed bool } // NewCallbackSerializer returns a new CallbackSerializer instance. The provided @@ -65,56 +62,34 @@ func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { // callbacks to be executed by the serializer. It is not possible to add // callbacks once the context passed to NewCallbackSerializer is cancelled. func (cs *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { - cs.closedMu.Lock() - defer cs.closedMu.Unlock() - - if cs.closed { - return false - } - cs.callbacks.Put(f) - return true + return cs.callbacks.Put(f) == nil } func (cs *CallbackSerializer) run(ctx context.Context) { - var backlog []func(context.Context) - defer close(cs.done) + + // TODO: when Go 1.21 is the oldest supported version, this loop and Close + // can be replaced with: + // + // context.AfterFunc(ctx, cs.callbacks.Close) for ctx.Err() == nil { select { case <-ctx.Done(): // Do nothing here. Next iteration of the for loop will not happen, // since ctx.Err() would be non-nil. - case callback, ok := <-cs.callbacks.Get(): - if !ok { - return - } + case cb := <-cs.callbacks.Get(): cs.callbacks.Load() - callback.(func(ctx context.Context))(ctx) + cb.(func(context.Context))(ctx) } } - // Fetch pending callbacks if any, and execute them before returning from - // this method and closing cs.done. - cs.closedMu.Lock() - cs.closed = true - backlog = cs.fetchPendingCallbacks() + // Close the buffer to prevent new callbacks from being added. cs.callbacks.Close() - cs.closedMu.Unlock() - for _, b := range backlog { - b(ctx) - } -} -func (cs *CallbackSerializer) fetchPendingCallbacks() []func(context.Context) { - var backlog []func(context.Context) - for { - select { - case b := <-cs.callbacks.Get(): - backlog = append(backlog, b.(func(context.Context))) - cs.callbacks.Load() - default: - return backlog - } + // Run all pending callbacks. + for cb := range cs.callbacks.Get() { + cs.callbacks.Load() + cb.(func(context.Context))(ctx) } } diff --git a/vendor/google.golang.org/grpc/internal/idle/idle.go b/vendor/google.golang.org/grpc/internal/idle/idle.go index 6c272476..fe49cb74 100644 --- a/vendor/google.golang.org/grpc/internal/idle/idle.go +++ b/vendor/google.golang.org/grpc/internal/idle/idle.go @@ -26,8 +26,6 @@ import ( "sync" "sync/atomic" "time" - - "google.golang.org/grpc/grpclog" ) // For overriding in unit tests. @@ -39,27 +37,12 @@ var timeAfterFunc = func(d time.Duration, f func()) *time.Timer { // and exit from idle mode. type Enforcer interface { ExitIdleMode() error - EnterIdleMode() error -} - -// Manager defines the functionality required to track RPC activity on a -// channel. -type Manager interface { - OnCallBegin() error - OnCallEnd() - Close() + EnterIdleMode() } -type noopManager struct{} - -func (noopManager) OnCallBegin() error { return nil } -func (noopManager) OnCallEnd() {} -func (noopManager) Close() {} - -// manager implements the Manager interface. It uses atomic operations to -// synchronize access to shared state and a mutex to guarantee mutual exclusion -// in a critical section. -type manager struct { +// Manager implements idleness detection and calls the configured Enforcer to +// enter/exit idle mode when appropriate. Must be created by NewManager. +type Manager struct { // State accessed atomically. lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed. activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there. @@ -69,8 +52,7 @@ type manager struct { // Can be accessed without atomics or mutex since these are set at creation // time and read-only after that. enforcer Enforcer // Functionality provided by grpc.ClientConn. - timeout int64 // Idle timeout duration nanos stored as an int64. - logger grpclog.LoggerV2 + timeout time.Duration // idleMu is used to guarantee mutual exclusion in two scenarios: // - Opposing intentions: @@ -88,57 +70,48 @@ type manager struct { timer *time.Timer } -// ManagerOptions is a collection of options used by -// NewManager. -type ManagerOptions struct { - Enforcer Enforcer - Timeout time.Duration - Logger grpclog.LoggerV2 +// NewManager creates a new idleness manager implementation for the +// given idle timeout. It begins in idle mode. +func NewManager(enforcer Enforcer, timeout time.Duration) *Manager { + return &Manager{ + enforcer: enforcer, + timeout: timeout, + actuallyIdle: true, + activeCallsCount: -math.MaxInt32, + } } -// NewManager creates a new idleness manager implementation for the -// given idle timeout. -func NewManager(opts ManagerOptions) Manager { - if opts.Timeout == 0 { - return noopManager{} +// resetIdleTimerLocked resets the idle timer to the given duration. Called +// when exiting idle mode or when the timer fires and we need to reset it. +func (m *Manager) resetIdleTimerLocked(d time.Duration) { + if m.isClosed() || m.timeout == 0 || m.actuallyIdle { + return } - m := &manager{ - enforcer: opts.Enforcer, - timeout: int64(opts.Timeout), - logger: opts.Logger, + // It is safe to ignore the return value from Reset() because this method is + // only ever called from the timer callback or when exiting idle mode. + if m.timer != nil { + m.timer.Stop() } - m.timer = timeAfterFunc(opts.Timeout, m.handleIdleTimeout) - return m + m.timer = timeAfterFunc(d, m.handleIdleTimeout) } -// resetIdleTimer resets the idle timer to the given duration. This method -// should only be called from the timer callback. -func (m *manager) resetIdleTimer(d time.Duration) { +func (m *Manager) resetIdleTimer(d time.Duration) { m.idleMu.Lock() defer m.idleMu.Unlock() - - if m.timer == nil { - // Only close sets timer to nil. We are done. - return - } - - // It is safe to ignore the return value from Reset() because this method is - // only ever called from the timer callback, which means the timer has - // already fired. - m.timer.Reset(d) + m.resetIdleTimerLocked(d) } // handleIdleTimeout is the timer callback that is invoked upon expiry of the // configured idle timeout. The channel is considered inactive if there are no // ongoing calls and no RPC activity since the last time the timer fired. -func (m *manager) handleIdleTimeout() { +func (m *Manager) handleIdleTimeout() { if m.isClosed() { return } if atomic.LoadInt32(&m.activeCallsCount) > 0 { - m.resetIdleTimer(time.Duration(m.timeout)) + m.resetIdleTimer(m.timeout) return } @@ -148,24 +121,12 @@ func (m *manager) handleIdleTimeout() { // Set the timer to fire after a duration of idle timeout, calculated // from the time the most recent RPC completed. atomic.StoreInt32(&m.activeSinceLastTimerCheck, 0) - m.resetIdleTimer(time.Duration(atomic.LoadInt64(&m.lastCallEndTime) + m.timeout - time.Now().UnixNano())) + m.resetIdleTimer(time.Duration(atomic.LoadInt64(&m.lastCallEndTime)-time.Now().UnixNano()) + m.timeout) return } - // This CAS operation is extremely likely to succeed given that there has - // been no activity since the last time we were here. Setting the - // activeCallsCount to -math.MaxInt32 indicates to OnCallBegin() that the - // channel is either in idle mode or is trying to get there. - if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) { - // This CAS operation can fail if an RPC started after we checked for - // activity at the top of this method, or one was ongoing from before - // the last time we were here. In both case, reset the timer and return. - m.resetIdleTimer(time.Duration(m.timeout)) - return - } - - // Now that we've set the active calls count to -math.MaxInt32, it's time to - // actually move to idle mode. + // Now that we've checked that there has been no activity, attempt to enter + // idle mode, which is very likely to succeed. if m.tryEnterIdleMode() { // Successfully entered idle mode. No timer needed until we exit idle. return @@ -174,8 +135,7 @@ func (m *manager) handleIdleTimeout() { // Failed to enter idle mode due to a concurrent RPC that kept the channel // active, or because of an error from the channel. Undo the attempt to // enter idle, and reset the timer to try again later. - atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) - m.resetIdleTimer(time.Duration(m.timeout)) + m.resetIdleTimer(m.timeout) } // tryEnterIdleMode instructs the channel to enter idle mode. But before @@ -185,36 +145,49 @@ func (m *manager) handleIdleTimeout() { // Return value indicates whether or not the channel moved to idle mode. // // Holds idleMu which ensures mutual exclusion with exitIdleMode. -func (m *manager) tryEnterIdleMode() bool { +func (m *Manager) tryEnterIdleMode() bool { + // Setting the activeCallsCount to -math.MaxInt32 indicates to OnCallBegin() + // that the channel is either in idle mode or is trying to get there. + if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) { + // This CAS operation can fail if an RPC started after we checked for + // activity in the timer handler, or one was ongoing from before the + // last time the timer fired, or if a test is attempting to enter idle + // mode without checking. In all cases, abort going into idle mode. + return false + } + // N.B. if we fail to enter idle mode after this, we must re-add + // math.MaxInt32 to m.activeCallsCount. + m.idleMu.Lock() defer m.idleMu.Unlock() if atomic.LoadInt32(&m.activeCallsCount) != -math.MaxInt32 { // We raced and lost to a new RPC. Very rare, but stop entering idle. + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) return false } if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 { - // An very short RPC could have come in (and also finished) after we + // A very short RPC could have come in (and also finished) after we // checked for calls count and activity in handleIdleTimeout(), but // before the CAS operation. So, we need to check for activity again. + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) return false } - // No new RPCs have come in since we last set the active calls count value - // -math.MaxInt32 in the timer callback. And since we have the lock, it is - // safe to enter idle mode now. - if err := m.enforcer.EnterIdleMode(); err != nil { - m.logger.Errorf("Failed to enter idle mode: %v", err) - return false - } - - // Successfully entered idle mode. + // No new RPCs have come in since we set the active calls count value to + // -math.MaxInt32. And since we have the lock, it is safe to enter idle mode + // unconditionally now. + m.enforcer.EnterIdleMode() m.actuallyIdle = true return true } +func (m *Manager) EnterIdleModeForTesting() { + m.tryEnterIdleMode() +} + // OnCallBegin is invoked at the start of every RPC. -func (m *manager) OnCallBegin() error { +func (m *Manager) OnCallBegin() error { if m.isClosed() { return nil } @@ -227,7 +200,7 @@ func (m *manager) OnCallBegin() error { // Channel is either in idle mode or is in the process of moving to idle // mode. Attempt to exit idle mode to allow this RPC. - if err := m.exitIdleMode(); err != nil { + if err := m.ExitIdleMode(); err != nil { // Undo the increment to calls count, and return an error causing the // RPC to fail. atomic.AddInt32(&m.activeCallsCount, -1) @@ -238,28 +211,30 @@ func (m *manager) OnCallBegin() error { return nil } -// exitIdleMode instructs the channel to exit idle mode. -// -// Holds idleMu which ensures mutual exclusion with tryEnterIdleMode. -func (m *manager) exitIdleMode() error { +// ExitIdleMode instructs m to call the enforcer's ExitIdleMode and update m's +// internal state. +func (m *Manager) ExitIdleMode() error { + // Holds idleMu which ensures mutual exclusion with tryEnterIdleMode. m.idleMu.Lock() defer m.idleMu.Unlock() - if !m.actuallyIdle { - // This can happen in two scenarios: + if m.isClosed() || !m.actuallyIdle { + // This can happen in three scenarios: // - handleIdleTimeout() set the calls count to -math.MaxInt32 and called // tryEnterIdleMode(). But before the latter could grab the lock, an RPC // came in and OnCallBegin() noticed that the calls count is negative. // - Channel is in idle mode, and multiple new RPCs come in at the same // time, all of them notice a negative calls count in OnCallBegin and get // here. The first one to get the lock would got the channel to exit idle. + // - Channel is not in idle mode, and the user calls Connect which calls + // m.ExitIdleMode. // - // Either way, nothing to do here. + // In any case, there is nothing to do here. return nil } if err := m.enforcer.ExitIdleMode(); err != nil { - return fmt.Errorf("channel failed to exit idle mode: %v", err) + return fmt.Errorf("failed to exit idle mode: %w", err) } // Undo the idle entry process. This also respects any new RPC attempts. @@ -267,12 +242,12 @@ func (m *manager) exitIdleMode() error { m.actuallyIdle = false // Start a new timer to fire after the configured idle timeout. - m.timer = timeAfterFunc(time.Duration(m.timeout), m.handleIdleTimeout) + m.resetIdleTimerLocked(m.timeout) return nil } // OnCallEnd is invoked at the end of every RPC. -func (m *manager) OnCallEnd() { +func (m *Manager) OnCallEnd() { if m.isClosed() { return } @@ -287,15 +262,17 @@ func (m *manager) OnCallEnd() { atomic.AddInt32(&m.activeCallsCount, -1) } -func (m *manager) isClosed() bool { +func (m *Manager) isClosed() bool { return atomic.LoadInt32(&m.closed) == 1 } -func (m *manager) Close() { +func (m *Manager) Close() { atomic.StoreInt32(&m.closed, 1) m.idleMu.Lock() - m.timer.Stop() - m.timer = nil + if m.timer != nil { + m.timer.Stop() + m.timer = nil + } m.idleMu.Unlock() } diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 0d94c63e..2549fe8e 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -73,6 +73,11 @@ var ( // xDS-enabled server invokes this method on a grpc.Server when a particular // listener moves to "not-serving" mode. DrainServerTransports any // func(*grpc.Server, string) + // IsRegisteredMethod returns whether the passed in method is registered as + // a method on the server. + IsRegisteredMethod any // func(*grpc.Server, string) bool + // ServerFromContext returns the server from the context. + ServerFromContext any // func(context.Context) *grpc.Server // AddGlobalServerOptions adds an array of ServerOption that will be // effective globally for newly created servers. The priority will be: 1. // user-provided; 2. this method; 3. default values. @@ -177,10 +182,12 @@ var ( GRPCResolverSchemeExtraMetadata string = "xds" // EnterIdleModeForTesting gets the ClientConn to enter IDLE mode. - EnterIdleModeForTesting any // func(*grpc.ClientConn) error + EnterIdleModeForTesting any // func(*grpc.ClientConn) // ExitIdleModeForTesting gets the ClientConn to exit IDLE mode. ExitIdleModeForTesting any // func(*grpc.ClientConn) error + + ChannelzTurnOffForTesting func() ) // HealthChecker defines the signature of the client-side LB channel health checking function. diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index 99e1e5b3..b66dcb21 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -23,7 +23,6 @@ package dns import ( "context" "encoding/json" - "errors" "fmt" "net" "os" @@ -37,6 +36,7 @@ import ( "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/resolver/dns/internal" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) @@ -47,15 +47,11 @@ var EnableSRVLookups = false var logger = grpclog.Component("dns") -// Globals to stub out in tests. TODO: Perhaps these two can be combined into a -// single variable for testing the resolver? -var ( - newTimer = time.NewTimer - newTimerDNSResRate = time.NewTimer -) - func init() { resolver.Register(NewBuilder()) + internal.TimeAfterFunc = time.After + internal.NewNetResolver = newNetResolver + internal.AddressDialer = addressDialer } const ( @@ -70,23 +66,6 @@ const ( txtAttribute = "grpc_config=" ) -var ( - errMissingAddr = errors.New("dns resolver: missing address") - - // Addresses ending with a colon that is supposed to be the separator - // between host and port is not allowed. E.g. "::" is a valid address as - // it is an IPv6 address (host only) and "[::]:" is invalid as it ends with - // a colon as the host and port separator - errEndsWithColon = errors.New("dns resolver: missing port after port-separator colon") -) - -var ( - defaultResolver netResolver = net.DefaultResolver - // To prevent excessive re-resolution, we enforce a rate limit on DNS - // resolution requests. - minDNSResRate = 30 * time.Second -) - var addressDialer = func(address string) func(context.Context, string, string) (net.Conn, error) { return func(ctx context.Context, network, _ string) (net.Conn, error) { var dialer net.Dialer @@ -94,7 +73,11 @@ var addressDialer = func(address string) func(context.Context, string, string) ( } } -var newNetResolver = func(authority string) (netResolver, error) { +var newNetResolver = func(authority string) (internal.NetResolver, error) { + if authority == "" { + return net.DefaultResolver, nil + } + host, port, err := parseTarget(authority, defaultDNSSvrPort) if err != nil { return nil, err @@ -104,7 +87,7 @@ var newNetResolver = func(authority string) (netResolver, error) { return &net.Resolver{ PreferGo: true, - Dial: addressDialer(authorityWithPort), + Dial: internal.AddressDialer(authorityWithPort), }, nil } @@ -142,13 +125,9 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts disableServiceConfig: opts.DisableServiceConfig, } - if target.URL.Host == "" { - d.resolver = defaultResolver - } else { - d.resolver, err = newNetResolver(target.URL.Host) - if err != nil { - return nil, err - } + d.resolver, err = internal.NewNetResolver(target.URL.Host) + if err != nil { + return nil, err } d.wg.Add(1) @@ -161,12 +140,6 @@ func (b *dnsBuilder) Scheme() string { return "dns" } -type netResolver interface { - LookupHost(ctx context.Context, host string) (addrs []string, err error) - LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) - LookupTXT(ctx context.Context, name string) (txts []string, err error) -} - // deadResolver is a resolver that does nothing. type deadResolver struct{} @@ -178,7 +151,7 @@ func (deadResolver) Close() {} type dnsResolver struct { host string port string - resolver netResolver + resolver internal.NetResolver ctx context.Context cancel context.CancelFunc cc resolver.ClientConn @@ -223,29 +196,27 @@ func (d *dnsResolver) watcher() { err = d.cc.UpdateState(*state) } - var timer *time.Timer + var waitTime time.Duration if err == nil { // Success resolving, wait for the next ResolveNow. However, also wait 30 // seconds at the very least to prevent constantly re-resolving. backoffIndex = 1 - timer = newTimerDNSResRate(minDNSResRate) + waitTime = internal.MinResolutionRate select { case <-d.ctx.Done(): - timer.Stop() return case <-d.rn: } } else { // Poll on an error found in DNS Resolver or an error received from // ClientConn. - timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex)) + waitTime = backoff.DefaultExponential.Backoff(backoffIndex) backoffIndex++ } select { case <-d.ctx.Done(): - timer.Stop() return - case <-timer.C: + case <-internal.TimeAfterFunc(waitTime): } } } @@ -387,7 +358,7 @@ func formatIP(addr string) (addrIP string, ok bool) { // target: ":80" defaultPort: "443" returns host: "localhost", port: "80" func parseTarget(target, defaultPort string) (host, port string, err error) { if target == "" { - return "", "", errMissingAddr + return "", "", internal.ErrMissingAddr } if ip := net.ParseIP(target); ip != nil { // target is an IPv4 or IPv6(without brackets) address @@ -397,7 +368,7 @@ func parseTarget(target, defaultPort string) (host, port string, err error) { if port == "" { // If the port field is empty (target ends with colon), e.g. "[::1]:", // this is an error. - return "", "", errEndsWithColon + return "", "", internal.ErrEndsWithColon } // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port if host == "" { diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go new file mode 100644 index 00000000..c7fc557d --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go @@ -0,0 +1,70 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains functionality internal to the dns resolver package. +package internal + +import ( + "context" + "errors" + "net" + "time" +) + +// NetResolver groups the methods on net.Resolver that are used by the DNS +// resolver implementation. This allows the default net.Resolver instance to be +// overidden from tests. +type NetResolver interface { + LookupHost(ctx context.Context, host string) (addrs []string, err error) + LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) + LookupTXT(ctx context.Context, name string) (txts []string, err error) +} + +var ( + // ErrMissingAddr is the error returned when building a DNS resolver when + // the provided target name is empty. + ErrMissingAddr = errors.New("dns resolver: missing address") + + // ErrEndsWithColon is the error returned when building a DNS resolver when + // the provided target name ends with a colon that is supposed to be the + // separator between host and port. E.g. "::" is a valid address as it is + // an IPv6 address (host only) and "[::]:" is invalid as it ends with a + // colon as the host and port separator + ErrEndsWithColon = errors.New("dns resolver: missing port after port-separator colon") +) + +// The following vars are overridden from tests. +var ( + // MinResolutionRate is the minimum rate at which re-resolutions are + // allowed. This helps to prevent excessive re-resolution. + MinResolutionRate = 30 * time.Second + + // TimeAfterFunc is used by the DNS resolver to wait for the given duration + // to elapse. In non-test code, this is implemented by time.After. In test + // code, this can be used to control the amount of time the resolver is + // blocked waiting for the duration to elapse. + TimeAfterFunc func(time.Duration) <-chan time.Time + + // NewNetResolver returns the net.Resolver instance for the given target. + NewNetResolver func(string) (NetResolver, error) + + // AddressDialer is the dialer used to dial the DNS server. It accepts the + // Host portion of the URL corresponding to the user's dial target and + // returns a dial function. + AddressDialer func(address string) func(context.Context, string, string) (net.Conn, error) +) diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_nonunix.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_nonunix.go new file mode 100644 index 00000000..aeffd3e1 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_nonunix.go @@ -0,0 +1,29 @@ +//go:build !unix + +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +import ( + "net" +) + +// NetDialerWithTCPKeepalive returns a vanilla net.Dialer on non-unix platforms. +func NetDialerWithTCPKeepalive() *net.Dialer { + return &net.Dialer{} +} diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go new file mode 100644 index 00000000..078137b7 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go @@ -0,0 +1,54 @@ +//go:build unix + +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +import ( + "net" + "syscall" + "time" + + "golang.org/x/sys/unix" +) + +// NetDialerWithTCPKeepalive returns a net.Dialer that enables TCP keepalives on +// the underlying connection with OS default values for keepalive parameters. +// +// TODO: Once https://github.com/golang/go/issues/62254 lands, and the +// appropriate Go version becomes less than our least supported Go version, we +// should look into using the new API to make things more straightforward. +func NetDialerWithTCPKeepalive() *net.Dialer { + return &net.Dialer{ + // Setting a negative value here prevents the Go stdlib from overriding + // the values of TCP keepalive time and interval. It also prevents the + // Go stdlib from enabling TCP keepalives by default. + KeepAlive: time.Duration(-1), + // This method is called after the underlying network socket is created, + // but before dialing the socket (or calling its connect() method). The + // combination of unconditionally enabling TCP keepalives here, and + // disabling the overriding of TCP keepalive parameters by setting the + // KeepAlive field to a negative value above, results in OS defaults for + // the TCP keealive interval and time parameters. + Control: func(_, _ string, c syscall.RawConn) error { + return c.Control(func(fd uintptr) { + unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1) + }) + }, + } +} diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 17f7a21b..a9d70e2a 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -75,11 +75,25 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s return nil, errors.New(msg) } + var localAddr net.Addr + if la := r.Context().Value(http.LocalAddrContextKey); la != nil { + localAddr, _ = la.(net.Addr) + } + var authInfo credentials.AuthInfo + if r.TLS != nil { + authInfo = credentials.TLSInfo{State: *r.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}} + } + p := peer.Peer{ + Addr: strAddr(r.RemoteAddr), + LocalAddr: localAddr, + AuthInfo: authInfo, + } st := &serverHandlerTransport{ rw: w, req: r, closedCh: make(chan struct{}), writes: make(chan func()), + peer: p, contentType: contentType, contentSubtype: contentSubtype, stats: stats, @@ -134,6 +148,8 @@ type serverHandlerTransport struct { headerMD metadata.MD + peer peer.Peer + closeOnce sync.Once closedCh chan struct{} // closed on Close @@ -165,7 +181,13 @@ func (ht *serverHandlerTransport) Close(err error) { }) } -func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) } +func (ht *serverHandlerTransport) Peer() *peer.Peer { + return &peer.Peer{ + Addr: ht.peer.Addr, + LocalAddr: ht.peer.LocalAddr, + AuthInfo: ht.peer.AuthInfo, + } +} // strAddr is a net.Addr backed by either a TCP "ip:port" string, or // the empty string if unknown. @@ -347,10 +369,8 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { return err } -func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream)) { +func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*Stream)) { // With this transport type there will be exactly 1 stream: this HTTP request. - - ctx := ht.req.Context() var cancel context.CancelFunc if ht.timeoutSet { ctx, cancel = context.WithTimeout(ctx, ht.timeout) @@ -370,34 +390,19 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream)) { ht.Close(errors.New("request is done processing")) }() + ctx = metadata.NewIncomingContext(ctx, ht.headerMD) req := ht.req - s := &Stream{ - id: 0, // irrelevant - requestRead: func(int) {}, - cancel: cancel, - buf: newRecvBuffer(), - st: ht, - method: req.URL.Path, - recvCompress: req.Header.Get("grpc-encoding"), - contentSubtype: ht.contentSubtype, - } - pr := &peer.Peer{ - Addr: ht.RemoteAddr(), - } - if req.TLS != nil { - pr.AuthInfo = credentials.TLSInfo{State: *req.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}} - } - ctx = metadata.NewIncomingContext(ctx, ht.headerMD) - s.ctx = peer.NewContext(ctx, pr) - for _, sh := range ht.stats { - s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) - inHeader := &stats.InHeader{ - FullMethod: s.method, - RemoteAddr: ht.RemoteAddr(), - Compression: s.recvCompress, - } - sh.HandleRPC(s.ctx, inHeader) + id: 0, // irrelevant + ctx: ctx, + requestRead: func(int) {}, + cancel: cancel, + buf: newRecvBuffer(), + st: ht, + method: req.URL.Path, + recvCompress: req.Header.Get("grpc-encoding"), + contentSubtype: ht.contentSubtype, + headerWireLength: 0, // won't have access to header wire length until golang/go#18997. } s.trReader = &transportReader{ reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}}, diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index d6f5c493..59f67655 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -36,6 +36,7 @@ import ( "golang.org/x/net/http2/hpack" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/channelz" icredentials "google.golang.org/grpc/internal/credentials" "google.golang.org/grpc/internal/grpclog" @@ -43,7 +44,7 @@ import ( "google.golang.org/grpc/internal/grpcutil" imetadata "google.golang.org/grpc/internal/metadata" istatus "google.golang.org/grpc/internal/status" - "google.golang.org/grpc/internal/syscall" + isyscall "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/internal/transport/networktype" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" @@ -176,7 +177,7 @@ func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error if networkType == "tcp" && useProxy { return proxyDial(ctx, address, grpcUA) } - return (&net.Dialer{}).DialContext(ctx, networkType, address) + return internal.NetDialerWithTCPKeepalive().DialContext(ctx, networkType, address) } func isTemporary(err error) bool { @@ -262,7 +263,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } keepaliveEnabled := false if kp.Time != infinity { - if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { + if err = isyscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) } keepaliveEnabled = true @@ -493,8 +494,9 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { func (t *http2Client) getPeer() *peer.Peer { return &peer.Peer{ - Addr: t.remoteAddr, - AuthInfo: t.authInfo, // Can be nil + Addr: t.remoteAddr, + AuthInfo: t.authInfo, // Can be nil + LocalAddr: t.localAddr, } } diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 6fa1eb41..680c9eba 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -68,18 +68,15 @@ var serverConnectionCounter uint64 // http2Server implements the ServerTransport interface with HTTP2. type http2Server struct { - lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. - ctx context.Context - done chan struct{} - conn net.Conn - loopy *loopyWriter - readerDone chan struct{} // sync point to enable testing. - writerDone chan struct{} // sync point to enable testing. - remoteAddr net.Addr - localAddr net.Addr - authInfo credentials.AuthInfo // auth info about the connection - inTapHandle tap.ServerInHandle - framer *framer + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. + done chan struct{} + conn net.Conn + loopy *loopyWriter + readerDone chan struct{} // sync point to enable testing. + loopyWriterDone chan struct{} + peer peer.Peer + inTapHandle tap.ServerInHandle + framer *framer // The max number of concurrent streams. maxStreams uint32 // controlBuf delivers all the control related tasks (e.g., window @@ -243,16 +240,18 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, } done := make(chan struct{}) + peer := peer.Peer{ + Addr: conn.RemoteAddr(), + LocalAddr: conn.LocalAddr(), + AuthInfo: authInfo, + } t := &http2Server{ - ctx: setConnection(context.Background(), rawConn), done: done, conn: conn, - remoteAddr: conn.RemoteAddr(), - localAddr: conn.LocalAddr(), - authInfo: authInfo, + peer: peer, framer: framer, readerDone: make(chan struct{}), - writerDone: make(chan struct{}), + loopyWriterDone: make(chan struct{}), maxStreams: config.MaxStreams, inTapHandle: config.InTapHandle, fc: &trInFlow{limit: uint32(icwz)}, @@ -267,8 +266,6 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, bufferPool: newBufferPool(), } t.logger = prefixLoggerForServerTransport(t) - // Add peer information to the http2server context. - t.ctx = peer.NewContext(t.ctx, t.getPeer()) t.controlBuf = newControlBuffer(t.done) if dynamicWindow { @@ -277,15 +274,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, updateFlowControl: t.updateFlowControl, } } - for _, sh := range t.stats { - t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{ - RemoteAddr: t.remoteAddr, - LocalAddr: t.localAddr, - }) - connBegin := &stats.ConnBegin{} - sh.HandleConn(t.ctx, connBegin) - } - t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) + t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.peer.Addr, t.peer.LocalAddr)) if err != nil { return nil, err } @@ -334,7 +323,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler t.loopy.run() - close(t.writerDone) + close(t.loopyWriterDone) }() go t.keepalive() return t, nil @@ -342,7 +331,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, // operateHeaders takes action on the decoded headers. Returns an error if fatal // error encountered and transport needs to close, otherwise returns nil. -func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream)) error { +func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*Stream)) error { // Acquire max stream ID lock for entire duration t.maxStreamMu.Lock() defer t.maxStreamMu.Unlock() @@ -369,10 +358,11 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( buf := newRecvBuffer() s := &Stream{ - id: streamID, - st: t, - buf: buf, - fc: &inFlow{limit: uint32(t.initialWindowSize)}, + id: streamID, + st: t, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + headerWireLength: int(frame.Header().Length), } var ( // if false, content-type was missing or invalid @@ -511,9 +501,9 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( s.state = streamReadDone } if timeoutSet { - s.ctx, s.cancel = context.WithTimeout(t.ctx, timeout) + s.ctx, s.cancel = context.WithTimeout(ctx, timeout) } else { - s.ctx, s.cancel = context.WithCancel(t.ctx) + s.ctx, s.cancel = context.WithCancel(ctx) } // Attach the received metadata to the context. @@ -592,18 +582,6 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( s.requestRead = func(n int) { t.adjustWindow(s, uint32(n)) } - for _, sh := range t.stats { - s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) - inHeader := &stats.InHeader{ - FullMethod: s.method, - RemoteAddr: t.remoteAddr, - LocalAddr: t.localAddr, - Compression: s.recvCompress, - WireLength: int(frame.Header().Length), - Header: mdata.Copy(), - } - sh.HandleRPC(s.ctx, inHeader) - } s.ctxDone = s.ctx.Done() s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) s.trReader = &transportReader{ @@ -629,8 +607,11 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( // HandleStreams receives incoming streams using the given handler. This is // typically run in a separate goroutine. // traceCtx attaches trace to ctx and returns the new context. -func (t *http2Server) HandleStreams(handle func(*Stream)) { - defer close(t.readerDone) +func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) { + defer func() { + <-t.loopyWriterDone + close(t.readerDone) + }() for { t.controlBuf.throttle() frame, err := t.framer.fr.ReadFrame() @@ -664,7 +645,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream)) { } switch frame := frame.(type) { case *http2.MetaHeadersFrame: - if err := t.operateHeaders(frame, handle); err != nil { + if err := t.operateHeaders(ctx, frame, handle); err != nil { t.Close(err) break } @@ -1242,10 +1223,6 @@ func (t *http2Server) Close(err error) { for _, s := range streams { s.cancel() } - for _, sh := range t.stats { - connEnd := &stats.ConnEnd{} - sh.HandleConn(t.ctx, connEnd) - } } // deleteStream deletes the stream s from transport's active streams. @@ -1311,10 +1288,6 @@ func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eo }) } -func (t *http2Server) RemoteAddr() net.Addr { - return t.remoteAddr -} - func (t *http2Server) Drain(debugData string) { t.mu.Lock() defer t.mu.Unlock() @@ -1397,11 +1370,11 @@ func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric { LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), LocalFlowControlWindow: int64(t.fc.getSize()), SocketOptions: channelz.GetSocketOption(t.conn), - LocalAddr: t.localAddr, - RemoteAddr: t.remoteAddr, + LocalAddr: t.peer.LocalAddr, + RemoteAddr: t.peer.Addr, // RemoteName : } - if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { + if au, ok := t.peer.AuthInfo.(credentials.ChannelzSecurityInfo); ok { s.Security = au.GetSecurityValue() } s.RemoteFlowControlWindow = t.getOutFlowWindow() @@ -1433,10 +1406,12 @@ func (t *http2Server) getOutFlowWindow() int64 { } } -func (t *http2Server) getPeer() *peer.Peer { +// Peer returns the peer of the transport. +func (t *http2Server) Peer() *peer.Peer { return &peer.Peer{ - Addr: t.remoteAddr, - AuthInfo: t.authInfo, // Can be nil + Addr: t.peer.Addr, + LocalAddr: t.peer.LocalAddr, + AuthInfo: t.peer.AuthInfo, // Can be nil } } @@ -1461,6 +1436,6 @@ func GetConnection(ctx context.Context) net.Conn { // SetConnection adds the connection to the context to be able to get // information about the destination ip and port for an incoming RPC. This also // allows any unary or streaming interceptors to see the connection. -func setConnection(ctx context.Context, conn net.Conn) context.Context { +func SetConnection(ctx context.Context, conn net.Conn) context.Context { return context.WithValue(ctx, connectionKey{}, conn) } diff --git a/vendor/google.golang.org/grpc/internal/transport/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go index 41596198..24fa1032 100644 --- a/vendor/google.golang.org/grpc/internal/transport/proxy.go +++ b/vendor/google.golang.org/grpc/internal/transport/proxy.go @@ -28,6 +28,8 @@ import ( "net/http" "net/http/httputil" "net/url" + + "google.golang.org/grpc/internal" ) const proxyAuthHeaderKey = "Proxy-Authorization" @@ -112,7 +114,7 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri // proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy // is necessary, dials, does the HTTP CONNECT handshake, and returns the // connection. -func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn, err error) { +func proxyDial(ctx context.Context, addr string, grpcUA string) (net.Conn, error) { newAddr := addr proxyURL, err := mapAddress(addr) if err != nil { @@ -122,15 +124,15 @@ func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn, newAddr = proxyURL.Host } - conn, err = (&net.Dialer{}).DialContext(ctx, "tcp", newAddr) + conn, err := internal.NetDialerWithTCPKeepalive().DialContext(ctx, "tcp", newAddr) if err != nil { - return + return nil, err } - if proxyURL != nil { + if proxyURL == nil { // proxy is disabled if proxyURL is nil. - conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA) + return conn, err } - return + return doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA) } func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index aac056e7..b7b8fec1 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -37,6 +37,7 @@ import ( "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" @@ -265,7 +266,8 @@ type Stream struct { // headerValid indicates whether a valid header was received. Only // meaningful after headerChan is closed (always call waitOnHeader() before // reading its value). Not valid on server side. - headerValid bool + headerValid bool + headerWireLength int // Only set on server side. // hdrMu protects header and trailer metadata on the server-side. hdrMu sync.Mutex @@ -425,6 +427,12 @@ func (s *Stream) Context() context.Context { return s.ctx } +// SetContext sets the context of the stream. This will be deleted once the +// stats handler callouts all move to gRPC layer. +func (s *Stream) SetContext(ctx context.Context) { + s.ctx = ctx +} + // Method returns the method for the stream. func (s *Stream) Method() string { return s.method @@ -437,6 +445,12 @@ func (s *Stream) Status() *status.Status { return s.status } +// HeaderWireLength returns the size of the headers of the stream as received +// from the wire. Valid only on the server. +func (s *Stream) HeaderWireLength() int { + return s.headerWireLength +} + // SetHeader sets the header metadata. This can be called multiple times. // Server side only. // This should not be called in parallel to other data writes. @@ -698,7 +712,7 @@ type ClientTransport interface { // Write methods for a given Stream will be called serially. type ServerTransport interface { // HandleStreams receives incoming streams using the given handler. - HandleStreams(func(*Stream)) + HandleStreams(context.Context, func(*Stream)) // WriteHeader sends the header metadata for the given stream. // WriteHeader may not be called on all streams. @@ -717,8 +731,8 @@ type ServerTransport interface { // handlers will be terminated asynchronously. Close(err error) - // RemoteAddr returns the remote network address. - RemoteAddr() net.Addr + // Peer returns the peer of the server transport. + Peer() *peer.Peer // Drain notifies the client this ServerTransport stops accepting new RPCs. Drain(debugData string) diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index a2cdcaf1..49446825 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -153,14 +153,16 @@ func Join(mds ...MD) MD { type mdIncomingKey struct{} type mdOutgoingKey struct{} -// NewIncomingContext creates a new context with incoming md attached. +// NewIncomingContext creates a new context with incoming md attached. md must +// not be modified after calling this function. func NewIncomingContext(ctx context.Context, md MD) context.Context { return context.WithValue(ctx, mdIncomingKey{}, md) } // NewOutgoingContext creates a new context with outgoing md attached. If used // in conjunction with AppendToOutgoingContext, NewOutgoingContext will -// overwrite any previously-appended metadata. +// overwrite any previously-appended metadata. md must not be modified after +// calling this function. func NewOutgoingContext(ctx context.Context, md MD) context.Context { return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md}) } @@ -203,7 +205,8 @@ func FromIncomingContext(ctx context.Context) (MD, bool) { } // ValueFromIncomingContext returns the metadata value corresponding to the metadata -// key from the incoming metadata if it exists. Key must be lower-case. +// key from the incoming metadata if it exists. Keys are matched in a case insensitive +// manner. // // # Experimental // @@ -219,17 +222,16 @@ func ValueFromIncomingContext(ctx context.Context, key string) []string { return copyOf(v) } for k, v := range md { - // We need to manually convert all keys to lower case, because MD is a - // map, and there's no guarantee that the MD attached to the context is - // created using our helper functions. - if strings.ToLower(k) == key { + // Case insenitive comparison: MD is a map, and there's no guarantee + // that the MD attached to the context is created using our helper + // functions. + if strings.EqualFold(k, key) { return copyOf(v) } } return nil } -// the returned slice must not be modified in place func copyOf(v []string) []string { vals := make([]string, len(v)) copy(vals, v) diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go index e01d219f..a821ff9b 100644 --- a/vendor/google.golang.org/grpc/peer/peer.go +++ b/vendor/google.golang.org/grpc/peer/peer.go @@ -32,6 +32,8 @@ import ( type Peer struct { // Addr is the peer address. Addr net.Addr + // LocalAddr is the local address. + LocalAddr net.Addr // AuthInfo is the authentication information of the transport. // It is nil if there is no transport security being used. AuthInfo credentials.AuthInfo diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index 236837f4..bf56faa7 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -37,7 +37,6 @@ import ( type pickerWrapper struct { mu sync.Mutex done bool - idle bool blockingCh chan struct{} picker balancer.Picker statsHandlers []stats.Handler // to record blocking picker calls @@ -53,11 +52,7 @@ func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper { // updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. func (pw *pickerWrapper) updatePicker(p balancer.Picker) { pw.mu.Lock() - if pw.done || pw.idle { - // There is a small window where a picker update from the LB policy can - // race with the channel going to idle mode. If the picker is idle here, - // it is because the channel asked it to do so, and therefore it is sage - // to ignore the update from the LB policy. + if pw.done { pw.mu.Unlock() return } @@ -210,23 +205,15 @@ func (pw *pickerWrapper) close() { close(pw.blockingCh) } -func (pw *pickerWrapper) enterIdleMode() { - pw.mu.Lock() - defer pw.mu.Unlock() - if pw.done { - return - } - pw.idle = true -} - -func (pw *pickerWrapper) exitIdleMode() { +// reset clears the pickerWrapper and prepares it for being used again when idle +// mode is exited. +func (pw *pickerWrapper) reset() { pw.mu.Lock() defer pw.mu.Unlock() if pw.done { return } pw.blockingCh = make(chan struct{}) - pw.idle = false } // dropError is a wrapper error that indicates the LB policy wishes to drop the diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go index 2e9cf66b..5128f936 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -25,7 +25,6 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/internal/envconfig" internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/pretty" @@ -65,19 +64,6 @@ type pfConfig struct { } func (*pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { - if !envconfig.PickFirstLBConfig { - // Prior to supporting loadbalancing configuration, the pick_first LB - // policy did not implement the balancer.ConfigParser interface. This - // meant that if a non-empty configuration was passed to it, the service - // config unmarshaling code would throw a warning log, but would - // continue using the pick_first LB policy. The code below ensures the - // same behavior is retained if the env var is not set. - if string(js) != "{}" { - logger.Warningf("Ignoring non-empty balancer configuration %q for the pick_first LB policy", string(js)) - } - return nil, nil - } - var cfg pfConfig if err := json.Unmarshal(js, &cfg); err != nil { return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go new file mode 100644 index 00000000..14aa6f20 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go @@ -0,0 +1,36 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package dns implements a dns resolver to be installed as the default resolver +// in grpc. +// +// Deprecated: this package is imported by grpc and should not need to be +// imported directly by users. +package dns + +import ( + "google.golang.org/grpc/internal/resolver/dns" + "google.golang.org/grpc/resolver" +) + +// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. +// +// Deprecated: import grpc and use resolver.Get("dns") instead. +func NewBuilder() resolver.Builder { + return dns.NewBuilder() +} diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go index 804be887..ada5b9bb 100644 --- a/vendor/google.golang.org/grpc/resolver/map.go +++ b/vendor/google.golang.org/grpc/resolver/map.go @@ -136,3 +136,116 @@ func (a *AddressMap) Values() []any { } return ret } + +type endpointNode struct { + addrs map[string]struct{} +} + +// Equal returns whether the unordered set of addrs are the same between the +// endpoint nodes. +func (en *endpointNode) Equal(en2 *endpointNode) bool { + if len(en.addrs) != len(en2.addrs) { + return false + } + for addr := range en.addrs { + if _, ok := en2.addrs[addr]; !ok { + return false + } + } + return true +} + +func toEndpointNode(endpoint Endpoint) endpointNode { + en := make(map[string]struct{}) + for _, addr := range endpoint.Addresses { + en[addr.Addr] = struct{}{} + } + return endpointNode{ + addrs: en, + } +} + +// EndpointMap is a map of endpoints to arbitrary values keyed on only the +// unordered set of address strings within an endpoint. This map is not thread +// safe, thus it is unsafe to access concurrently. Must be created via +// NewEndpointMap; do not construct directly. +type EndpointMap struct { + endpoints map[*endpointNode]any +} + +// NewEndpointMap creates a new EndpointMap. +func NewEndpointMap() *EndpointMap { + return &EndpointMap{ + endpoints: make(map[*endpointNode]any), + } +} + +// Get returns the value for the address in the map, if present. +func (em *EndpointMap) Get(e Endpoint) (value any, ok bool) { + en := toEndpointNode(e) + if endpoint := em.find(en); endpoint != nil { + return em.endpoints[endpoint], true + } + return nil, false +} + +// Set updates or adds the value to the address in the map. +func (em *EndpointMap) Set(e Endpoint, value any) { + en := toEndpointNode(e) + if endpoint := em.find(en); endpoint != nil { + em.endpoints[endpoint] = value + return + } + em.endpoints[&en] = value +} + +// Len returns the number of entries in the map. +func (em *EndpointMap) Len() int { + return len(em.endpoints) +} + +// Keys returns a slice of all current map keys, as endpoints specifying the +// addresses present in the endpoint keys, in which uniqueness is determined by +// the unordered set of addresses. Thus, endpoint information returned is not +// the full endpoint data (drops duplicated addresses and attributes) but can be +// used for EndpointMap accesses. +func (em *EndpointMap) Keys() []Endpoint { + ret := make([]Endpoint, 0, len(em.endpoints)) + for en := range em.endpoints { + var endpoint Endpoint + for addr := range en.addrs { + endpoint.Addresses = append(endpoint.Addresses, Address{Addr: addr}) + } + ret = append(ret, endpoint) + } + return ret +} + +// Values returns a slice of all current map values. +func (em *EndpointMap) Values() []any { + ret := make([]any, 0, len(em.endpoints)) + for _, val := range em.endpoints { + ret = append(ret, val) + } + return ret +} + +// find returns a pointer to the endpoint node in em if the endpoint node is +// already present. If not found, nil is returned. The comparisons are done on +// the unordered set of addresses within an endpoint. +func (em EndpointMap) find(e endpointNode) *endpointNode { + for endpoint := range em.endpoints { + if e.Equal(endpoint) { + return endpoint + } + } + return nil +} + +// Delete removes the specified endpoint from the map. +func (em *EndpointMap) Delete(e Endpoint) { + en := toEndpointNode(e) + if entry := em.find(en); entry != nil { + delete(em.endpoints, entry) + } +} diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index 11384e22..bd1c7d01 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -240,11 +240,6 @@ type ClientConn interface { // // Deprecated: Use UpdateState instead. NewAddress(addresses []Address) - // NewServiceConfig is called by resolver to notify ClientConn a new - // service config. The service config should be provided as a json string. - // - // Deprecated: Use UpdateState instead. - NewServiceConfig(serviceConfig string) // ParseServiceConfig parses the provided service config and returns an // object that provides the parsed config. ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult @@ -286,6 +281,11 @@ func (t Target) Endpoint() string { return strings.TrimPrefix(endpoint, "/") } +// String returns a string representation of Target. +func (t Target) String() string { + return t.URL.String() +} + // Builder creates a resolver that will be used to watch name resolution updates. type Builder interface { // Build creates a new resolver for the given target. diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go deleted file mode 100644 index d6833056..00000000 --- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ /dev/null @@ -1,247 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "context" - "strings" - "sync" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/internal/pretty" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/serviceconfig" -) - -// resolverStateUpdater wraps the single method used by ccResolverWrapper to -// report a state update from the actual resolver implementation. -type resolverStateUpdater interface { - updateResolverState(s resolver.State, err error) error -} - -// ccResolverWrapper is a wrapper on top of cc for resolvers. -// It implements resolver.ClientConn interface. -type ccResolverWrapper struct { - // The following fields are initialized when the wrapper is created and are - // read-only afterwards, and therefore can be accessed without a mutex. - cc resolverStateUpdater - channelzID *channelz.Identifier - ignoreServiceConfig bool - opts ccResolverWrapperOpts - serializer *grpcsync.CallbackSerializer // To serialize all incoming calls. - serializerCancel context.CancelFunc // To close the serializer, accessed only from close(). - - // All incoming (resolver --> gRPC) calls are guaranteed to execute in a - // mutually exclusive manner as they are scheduled on the serializer. - // Fields accessed *only* in these serializer callbacks, can therefore be - // accessed without a mutex. - curState resolver.State - - // mu guards access to the below fields. - mu sync.Mutex - closed bool - resolver resolver.Resolver // Accessed only from outgoing calls. -} - -// ccResolverWrapperOpts wraps the arguments to be passed when creating a new -// ccResolverWrapper. -type ccResolverWrapperOpts struct { - target resolver.Target // User specified dial target to resolve. - builder resolver.Builder // Resolver builder to use. - bOpts resolver.BuildOptions // Resolver build options to use. - channelzID *channelz.Identifier // Channelz identifier for the channel. -} - -// newCCResolverWrapper uses the resolver.Builder to build a Resolver and -// returns a ccResolverWrapper object which wraps the newly built resolver. -func newCCResolverWrapper(cc resolverStateUpdater, opts ccResolverWrapperOpts) (*ccResolverWrapper, error) { - ctx, cancel := context.WithCancel(context.Background()) - ccr := &ccResolverWrapper{ - cc: cc, - channelzID: opts.channelzID, - ignoreServiceConfig: opts.bOpts.DisableServiceConfig, - opts: opts, - serializer: grpcsync.NewCallbackSerializer(ctx), - serializerCancel: cancel, - } - - // Cannot hold the lock at build time because the resolver can send an - // update or error inline and these incoming calls grab the lock to schedule - // a callback in the serializer. - r, err := opts.builder.Build(opts.target, ccr, opts.bOpts) - if err != nil { - cancel() - return nil, err - } - - // Any error reported by the resolver at build time that leads to a - // re-resolution request from the balancer is dropped by grpc until we - // return from this function. So, we don't have to handle pending resolveNow - // requests here. - ccr.mu.Lock() - ccr.resolver = r - ccr.mu.Unlock() - - return ccr, nil -} - -func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { - ccr.mu.Lock() - defer ccr.mu.Unlock() - - // ccr.resolver field is set only after the call to Build() returns. But in - // the process of building, the resolver may send an error update which when - // propagated to the balancer may result in a re-resolution request. - if ccr.closed || ccr.resolver == nil { - return - } - ccr.resolver.ResolveNow(o) -} - -func (ccr *ccResolverWrapper) close() { - ccr.mu.Lock() - if ccr.closed { - ccr.mu.Unlock() - return - } - - channelz.Info(logger, ccr.channelzID, "Closing the name resolver") - - // Close the serializer to ensure that no more calls from the resolver are - // handled, before actually closing the resolver. - ccr.serializerCancel() - ccr.closed = true - r := ccr.resolver - ccr.mu.Unlock() - - // Give enqueued callbacks a chance to finish. - <-ccr.serializer.Done() - - // Spawn a goroutine to close the resolver (since it may block trying to - // cleanup all allocated resources) and return early. - go r.Close() -} - -// serializerScheduleLocked is a convenience method to schedule a function to be -// run on the serializer while holding ccr.mu. -func (ccr *ccResolverWrapper) serializerScheduleLocked(f func(context.Context)) { - ccr.mu.Lock() - ccr.serializer.Schedule(f) - ccr.mu.Unlock() -} - -// UpdateState is called by resolver implementations to report new state to gRPC -// which includes addresses and service config. -func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { - errCh := make(chan error, 1) - if s.Endpoints == nil { - s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses)) - for _, a := range s.Addresses { - ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes} - ep.Addresses[0].BalancerAttributes = nil - s.Endpoints = append(s.Endpoints, ep) - } - } - ok := ccr.serializer.Schedule(func(context.Context) { - ccr.addChannelzTraceEvent(s) - ccr.curState = s - if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { - errCh <- balancer.ErrBadResolverState - return - } - errCh <- nil - }) - if !ok { - // The only time when Schedule() fail to add the callback to the - // serializer is when the serializer is closed, and this happens only - // when the resolver wrapper is closed. - return nil - } - return <-errCh -} - -// ReportError is called by resolver implementations to report errors -// encountered during name resolution to gRPC. -func (ccr *ccResolverWrapper) ReportError(err error) { - ccr.serializerScheduleLocked(func(_ context.Context) { - channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) - ccr.cc.updateResolverState(resolver.State{}, err) - }) -} - -// NewAddress is called by the resolver implementation to send addresses to -// gRPC. -func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { - ccr.serializerScheduleLocked(func(_ context.Context) { - ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) - ccr.curState.Addresses = addrs - ccr.cc.updateResolverState(ccr.curState, nil) - }) -} - -// NewServiceConfig is called by the resolver implementation to send service -// configs to gRPC. -func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { - ccr.serializerScheduleLocked(func(_ context.Context) { - channelz.Infof(logger, ccr.channelzID, "ccResolverWrapper: got new service config: %s", sc) - if ccr.ignoreServiceConfig { - channelz.Info(logger, ccr.channelzID, "Service config lookups disabled; ignoring config") - return - } - scpr := parseServiceConfig(sc) - if scpr.Err != nil { - channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) - return - } - ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) - ccr.curState.ServiceConfig = scpr - ccr.cc.updateResolverState(ccr.curState, nil) - }) -} - -// ParseServiceConfig is called by resolver implementations to parse a JSON -// representation of the service config. -func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { - return parseServiceConfig(scJSON) -} - -// addChannelzTraceEvent adds a channelz trace event containing the new -// state received from resolver implementations. -func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { - var updates []string - var oldSC, newSC *ServiceConfig - var oldOK, newOK bool - if ccr.curState.ServiceConfig != nil { - oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig) - } - if s.ServiceConfig != nil { - newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig) - } - if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) { - updates = append(updates, "service config updated") - } - if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 { - updates = append(updates, "resolver returned an empty address list") - } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { - updates = append(updates, "resolver returned new addresses") - } - channelz.Infof(logger, ccr.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) -} diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go new file mode 100644 index 00000000..c79bab12 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver_wrapper.go @@ -0,0 +1,197 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "strings" + "sync" + + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +// ccResolverWrapper is a wrapper on top of cc for resolvers. +// It implements resolver.ClientConn interface. +type ccResolverWrapper struct { + // The following fields are initialized when the wrapper is created and are + // read-only afterwards, and therefore can be accessed without a mutex. + cc *ClientConn + ignoreServiceConfig bool + serializer *grpcsync.CallbackSerializer + serializerCancel context.CancelFunc + + resolver resolver.Resolver // only accessed within the serializer + + // The following fields are protected by mu. Caller must take cc.mu before + // taking mu. + mu sync.Mutex + curState resolver.State + closed bool +} + +// newCCResolverWrapper initializes the ccResolverWrapper. It can only be used +// after calling start, which builds the resolver. +func newCCResolverWrapper(cc *ClientConn) *ccResolverWrapper { + ctx, cancel := context.WithCancel(cc.ctx) + return &ccResolverWrapper{ + cc: cc, + ignoreServiceConfig: cc.dopts.disableServiceConfig, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, + } +} + +// start builds the name resolver using the resolver.Builder in cc and returns +// any error encountered. It must always be the first operation performed on +// any newly created ccResolverWrapper, except that close may be called instead. +func (ccr *ccResolverWrapper) start() error { + errCh := make(chan error) + ccr.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil { + return + } + opts := resolver.BuildOptions{ + DisableServiceConfig: ccr.cc.dopts.disableServiceConfig, + DialCreds: ccr.cc.dopts.copts.TransportCredentials, + CredsBundle: ccr.cc.dopts.copts.CredsBundle, + Dialer: ccr.cc.dopts.copts.Dialer, + } + var err error + ccr.resolver, err = ccr.cc.resolverBuilder.Build(ccr.cc.parsedTarget, ccr, opts) + errCh <- err + }) + return <-errCh +} + +func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { + ccr.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || ccr.resolver == nil { + return + } + ccr.resolver.ResolveNow(o) + }) +} + +// close initiates async shutdown of the wrapper. To determine the wrapper has +// finished shutting down, the channel should block on ccr.serializer.Done() +// without cc.mu held. +func (ccr *ccResolverWrapper) close() { + channelz.Info(logger, ccr.cc.channelzID, "Closing the name resolver") + ccr.mu.Lock() + ccr.closed = true + ccr.mu.Unlock() + + ccr.serializer.Schedule(func(context.Context) { + if ccr.resolver == nil { + return + } + ccr.resolver.Close() + ccr.resolver = nil + }) + ccr.serializerCancel() +} + +// UpdateState is called by resolver implementations to report new state to gRPC +// which includes addresses and service config. +func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { + ccr.cc.mu.Lock() + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + ccr.cc.mu.Unlock() + return nil + } + if s.Endpoints == nil { + s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses)) + for _, a := range s.Addresses { + ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes} + ep.Addresses[0].BalancerAttributes = nil + s.Endpoints = append(s.Endpoints, ep) + } + } + ccr.addChannelzTraceEvent(s) + ccr.curState = s + ccr.mu.Unlock() + return ccr.cc.updateResolverStateAndUnlock(s, nil) +} + +// ReportError is called by resolver implementations to report errors +// encountered during name resolution to gRPC. +func (ccr *ccResolverWrapper) ReportError(err error) { + ccr.cc.mu.Lock() + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + ccr.cc.mu.Unlock() + return + } + ccr.mu.Unlock() + channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) + ccr.cc.updateResolverStateAndUnlock(resolver.State{}, err) +} + +// NewAddress is called by the resolver implementation to send addresses to +// gRPC. +func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { + ccr.cc.mu.Lock() + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + ccr.cc.mu.Unlock() + return + } + s := resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig} + ccr.addChannelzTraceEvent(s) + ccr.curState = s + ccr.mu.Unlock() + ccr.cc.updateResolverStateAndUnlock(s, nil) +} + +// ParseServiceConfig is called by resolver implementations to parse a JSON +// representation of the service config. +func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { + return parseServiceConfig(scJSON) +} + +// addChannelzTraceEvent adds a channelz trace event containing the new +// state received from resolver implementations. +func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { + var updates []string + var oldSC, newSC *ServiceConfig + var oldOK, newOK bool + if ccr.curState.ServiceConfig != nil { + oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig) + } + if s.ServiceConfig != nil { + newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig) + } + if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) { + updates = append(updates, "service config updated") + } + if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 { + updates = append(updates, "resolver returned an empty address list") + } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { + updates = append(updates, "resolver returned new addresses") + } + channelz.Infof(logger, ccr.cc.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) +} diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 8f60d421..682fa183 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -70,6 +70,10 @@ func init() { internal.GetServerCredentials = func(srv *Server) credentials.TransportCredentials { return srv.opts.creds } + internal.IsRegisteredMethod = func(srv *Server, method string) bool { + return srv.isRegisteredMethod(method) + } + internal.ServerFromContext = serverFromContext internal.DrainServerTransports = func(srv *Server, addr string) { srv.drainServerTransports(addr) } @@ -81,6 +85,7 @@ func init() { } internal.BinaryLogger = binaryLogger internal.JoinServerOptions = newJoinServerOption + internal.RecvBufferPool = recvBufferPool } var statusOK = status.New(codes.OK, "") @@ -139,7 +144,8 @@ type Server struct { channelzID *channelz.Identifier czData *channelzData - serverWorkerChannel chan func() + serverWorkerChannel chan func() + serverWorkerChannelClose func() } type serverOptions struct { @@ -578,11 +584,13 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption { // options are used: StatsHandler, EnableTracing, or binary logging. In such // cases, the shared buffer pool will be ignored. // -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. +// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in +// v1.60.0 or later. func RecvBufferPool(bufferPool SharedBufferPool) ServerOption { + return recvBufferPool(bufferPool) +} + +func recvBufferPool(bufferPool SharedBufferPool) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.recvBufferPool = bufferPool }) @@ -616,15 +624,14 @@ func (s *Server) serverWorker() { // connections to reduce the time spent overall on runtime.morestack. func (s *Server) initServerWorkers() { s.serverWorkerChannel = make(chan func()) + s.serverWorkerChannelClose = grpcsync.OnceFunc(func() { + close(s.serverWorkerChannel) + }) for i := uint32(0); i < s.opts.numServerWorkers; i++ { go s.serverWorker() } } -func (s *Server) stopServerWorkers() { - close(s.serverWorkerChannel) -} - // NewServer creates a gRPC server which has no service registered and has not // started to accept requests yet. func NewServer(opt ...ServerOption) *Server { @@ -806,6 +813,18 @@ func (l *listenSocket) Close() error { // Serve returns when lis.Accept fails with fatal errors. lis will be closed when // this method returns. // Serve will return a non-nil error unless Stop or GracefulStop is called. +// +// Note: All supported releases of Go (as of December 2023) override the OS +// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive +// with OS defaults for keepalive time and interval, callers need to do the +// following two things: +// - pass a net.Listener created by calling the Listen method on a +// net.ListenConfig with the `KeepAlive` field set to a negative value. This +// will result in the Go standard library not overriding OS defaults for TCP +// keepalive interval and time. But this will also result in the Go standard +// library not enabling TCP keepalives by default. +// - override the Accept method on the passed in net.Listener and set the +// SO_KEEPALIVE socket option to enable TCP keepalives, with OS defaults. func (s *Server) Serve(lis net.Listener) error { s.mu.Lock() s.printf("serving") @@ -917,7 +936,7 @@ func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) { return } go func() { - s.serveStreams(st) + s.serveStreams(context.Background(), st, rawConn) s.removeConn(lisAddr, st) }() } @@ -971,18 +990,29 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { return st } -func (s *Server) serveStreams(st transport.ServerTransport) { - defer st.Close(errors.New("finished serving streams for the server transport")) - var wg sync.WaitGroup +func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport, rawConn net.Conn) { + ctx = transport.SetConnection(ctx, rawConn) + ctx = peer.NewContext(ctx, st.Peer()) + for _, sh := range s.opts.statsHandlers { + ctx = sh.TagConn(ctx, &stats.ConnTagInfo{ + RemoteAddr: st.Peer().Addr, + LocalAddr: st.Peer().LocalAddr, + }) + sh.HandleConn(ctx, &stats.ConnBegin{}) + } - streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) - st.HandleStreams(func(stream *transport.Stream) { - wg.Add(1) + defer func() { + st.Close(errors.New("finished serving streams for the server transport")) + for _, sh := range s.opts.statsHandlers { + sh.HandleConn(ctx, &stats.ConnEnd{}) + } + }() + streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) + st.HandleStreams(ctx, func(stream *transport.Stream) { streamQuota.acquire() f := func() { defer streamQuota.release() - defer wg.Done() s.handleStream(st, stream) } @@ -996,7 +1026,6 @@ func (s *Server) serveStreams(st transport.ServerTransport) { } go f() }) - wg.Wait() } var _ http.Handler = (*Server)(nil) @@ -1040,7 +1069,7 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { return } defer s.removeConn(listenerAddressForServeHTTP, st) - s.serveStreams(st) + s.serveStreams(r.Context(), st, nil) } func (s *Server) addConn(addr string, st transport.ServerTransport) bool { @@ -1689,6 +1718,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) { ctx := stream.Context() + ctx = contextWithServer(ctx, s) var ti *traceInfo if EnableTracing { tr := trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method()) @@ -1697,7 +1727,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str tr: tr, firstLine: firstLine{ client: false, - remoteAddr: t.RemoteAddr(), + remoteAddr: t.Peer().Addr, }, } if dl, ok := ctx.Deadline(); ok { @@ -1731,6 +1761,22 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str service := sm[:pos] method := sm[pos+1:] + md, _ := metadata.FromIncomingContext(ctx) + for _, sh := range s.opts.statsHandlers { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()}) + sh.HandleRPC(ctx, &stats.InHeader{ + FullMethod: stream.Method(), + RemoteAddr: t.Peer().Addr, + LocalAddr: t.Peer().LocalAddr, + Compression: stream.RecvCompress(), + WireLength: stream.HeaderWireLength(), + Header: md, + }) + } + // To have calls in stream callouts work. Will delete once all stats handler + // calls come from the gRPC layer. + stream.SetContext(ctx) + srv, knownService := s.services[service] if knownService { if md, ok := srv.methods[method]; ok { @@ -1820,62 +1866,68 @@ func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream // pending RPCs on the client side will get notified by connection // errors. func (s *Server) Stop() { - s.quit.Fire() + s.stop(false) +} - defer func() { - s.serveWG.Wait() - s.done.Fire() - }() +// GracefulStop stops the gRPC server gracefully. It stops the server from +// accepting new connections and RPCs and blocks until all the pending RPCs are +// finished. +func (s *Server) GracefulStop() { + s.stop(true) +} + +func (s *Server) stop(graceful bool) { + s.quit.Fire() + defer s.done.Fire() s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) s.mu.Lock() - listeners := s.lis - s.lis = nil - conns := s.conns - s.conns = nil - // interrupt GracefulStop if Stop and GracefulStop are called concurrently. - s.cv.Broadcast() + s.closeListenersLocked() + // Wait for serving threads to be ready to exit. Only then can we be sure no + // new conns will be created. s.mu.Unlock() + s.serveWG.Wait() - for lis := range listeners { - lis.Close() + s.mu.Lock() + defer s.mu.Unlock() + + if graceful { + s.drainAllServerTransportsLocked() + } else { + s.closeServerTransportsLocked() } - for _, cs := range conns { - for st := range cs { - st.Close(errors.New("Server.Stop called")) - } + + for len(s.conns) != 0 { + s.cv.Wait() } + s.conns = nil + if s.opts.numServerWorkers > 0 { - s.stopServerWorkers() + // Closing the channel (only once, via grpcsync.OnceFunc) after all the + // connections have been closed above ensures that there are no + // goroutines executing the callback passed to st.HandleStreams (where + // the channel is written to). + s.serverWorkerChannelClose() } - s.mu.Lock() if s.events != nil { s.events.Finish() s.events = nil } - s.mu.Unlock() } -// GracefulStop stops the gRPC server gracefully. It stops the server from -// accepting new connections and RPCs and blocks until all the pending RPCs are -// finished. -func (s *Server) GracefulStop() { - s.quit.Fire() - defer s.done.Fire() - - s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) - s.mu.Lock() - if s.conns == nil { - s.mu.Unlock() - return +// s.mu must be held by the caller. +func (s *Server) closeServerTransportsLocked() { + for _, conns := range s.conns { + for st := range conns { + st.Close(errors.New("Server.Stop called")) + } } +} - for lis := range s.lis { - lis.Close() - } - s.lis = nil +// s.mu must be held by the caller. +func (s *Server) drainAllServerTransportsLocked() { if !s.drain { for _, conns := range s.conns { for st := range conns { @@ -1884,22 +1936,14 @@ func (s *Server) GracefulStop() { } s.drain = true } +} - // Wait for serving threads to be ready to exit. Only then can we be sure no - // new conns will be created. - s.mu.Unlock() - s.serveWG.Wait() - s.mu.Lock() - - for len(s.conns) != 0 { - s.cv.Wait() - } - s.conns = nil - if s.events != nil { - s.events.Finish() - s.events = nil +// s.mu must be held by the caller. +func (s *Server) closeListenersLocked() { + for lis := range s.lis { + lis.Close() } - s.mu.Unlock() + s.lis = nil } // contentSubtype must be lowercase @@ -1913,11 +1957,50 @@ func (s *Server) getCodec(contentSubtype string) baseCodec { } codec := encoding.GetCodec(contentSubtype) if codec == nil { + logger.Warningf("Unsupported codec %q. Defaulting to %q for now. This will start to fail in future releases.", contentSubtype, proto.Name) return encoding.GetCodec(proto.Name) } return codec } +type serverKey struct{} + +// serverFromContext gets the Server from the context. +func serverFromContext(ctx context.Context) *Server { + s, _ := ctx.Value(serverKey{}).(*Server) + return s +} + +// contextWithServer sets the Server in the context. +func contextWithServer(ctx context.Context, server *Server) context.Context { + return context.WithValue(ctx, serverKey{}, server) +} + +// isRegisteredMethod returns whether the passed in method is registered as a +// method on the server. /service/method and service/method will match if the +// service and method are registered on the server. +func (s *Server) isRegisteredMethod(serviceMethod string) bool { + if serviceMethod != "" && serviceMethod[0] == '/' { + serviceMethod = serviceMethod[1:] + } + pos := strings.LastIndex(serviceMethod, "/") + if pos == -1 { // Invalid method name syntax. + return false + } + service := serviceMethod[:pos] + method := serviceMethod[pos+1:] + srv, knownService := s.services[service] + if knownService { + if _, ok := srv.methods[method]; ok { + return true + } + if _, ok := srv.streams[method]; ok { + return true + } + } + return false +} + // SetHeader sets the header metadata to be sent from the server to the client. // The context provided must be the context passed to the server's handler. // diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 6d2cadd7..dc2cea59 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.59.0" +const Version = "1.60.1" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index bb480f1f..896dc38f 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -35,7 +35,6 @@ if [[ "$1" = "-install" ]]; then # Install the pinned versions as defined in module tools. pushd ./test/tools go install \ - golang.org/x/lint/golint \ golang.org/x/tools/cmd/goimports \ honnef.co/go/tools/cmd/staticcheck \ github.com/client9/misspell/cmd/misspell @@ -77,12 +76,16 @@ fi not grep 'func Test[^(]' *_test.go not grep 'func Test[^(]' test/*.go +# - Check for typos in test function names +git grep 'func (s) ' -- "*_test.go" | not grep -v 'func (s) Test' +git grep 'func [A-Z]' -- "*_test.go" | not grep -v 'func Test\|Benchmark\|Example' + # - Do not import x/net/context. not git grep -l 'x/net/context' -- "*.go" # - Do not import math/rand for real library code. Use internal/grpcrand for # thread safety. -git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test' +git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^interop/stress\|grpcrand\|^benchmark\|wrr_test' # - Do not use "interface{}"; use "any" instead. git grep -l 'interface{}' -- "*.go" 2>&1 | not grep -v '\.pb\.go\|protoc-gen-go-grpc' @@ -94,15 +97,14 @@ git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpc not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" # - Ensure all usages of grpc_testing package are renamed when importing. -not git grep "\(import \|^\s*\)\"google.golang.org/grpc/interop/grpc_testing" -- "*.go" +not git grep "\(import \|^\s*\)\"google.golang.org/grpc/interop/grpc_testing" -- "*.go" # - Ensure all xds proto imports are renamed to *pb or *grpc. git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "' misspell -error . -# - gofmt, goimports, golint (with exceptions for generated code), go vet, -# go mod tidy. +# - gofmt, goimports, go vet, go mod tidy. # Perform these checks on each module inside gRPC. for MOD_FILE in $(find . -name 'go.mod'); do MOD_DIR=$(dirname ${MOD_FILE}) @@ -110,7 +112,6 @@ for MOD_FILE in $(find . -name 'go.mod'); do go vet -all ./... | fail_on_output gofmt -s -d -l . 2>&1 | fail_on_output goimports -l . 2>&1 | not grep -vE "\.pb\.go" - golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:" go mod tidy -compat=1.19 git status --porcelain 2>&1 | fail_on_output || \ @@ -119,94 +120,73 @@ for MOD_FILE in $(find . -name 'go.mod'); do done # - Collection of static analysis checks -# -# TODO(dfawley): don't use deprecated functions in examples or first-party -# plugins. -# TODO(dfawley): enable ST1019 (duplicate imports) but allow for protobufs. SC_OUT="$(mktemp)" -staticcheck -go 1.19 -checks 'inherit,-ST1015,-ST1019,-SA1019' ./... > "${SC_OUT}" || true -# Error if anything other than deprecation warnings are printed. -not grep -v "is deprecated:.*SA1019" "${SC_OUT}" -# Only ignore the following deprecated types/fields/functions. -not grep -Fv '.CredsBundle -.HeaderMap -.Metadata is deprecated: use Attributes -.NewAddress -.NewServiceConfig -.Type is deprecated: use Attributes -BuildVersion is deprecated -balancer.ErrTransientFailure -balancer.Picker -extDesc.Filename is deprecated -github.com/golang/protobuf/jsonpb is deprecated -grpc.CallCustomCodec -grpc.Code -grpc.Compressor -grpc.CustomCodec -grpc.Decompressor -grpc.MaxMsgSize -grpc.MethodConfig -grpc.NewGZIPCompressor -grpc.NewGZIPDecompressor -grpc.RPCCompressor -grpc.RPCDecompressor -grpc.ServiceConfig -grpc.WithCompressor -grpc.WithDecompressor -grpc.WithDialer -grpc.WithMaxMsgSize -grpc.WithServiceConfig -grpc.WithTimeout -http.CloseNotifier -info.SecurityVersion -proto is deprecated -proto.InternalMessageInfo is deprecated -proto.EnumName is deprecated -proto.ErrInternalBadWireType is deprecated -proto.FileDescriptor is deprecated -proto.Marshaler is deprecated -proto.MessageType is deprecated -proto.RegisterEnum is deprecated -proto.RegisterFile is deprecated -proto.RegisterType is deprecated -proto.RegisterExtension is deprecated -proto.RegisteredExtension is deprecated -proto.RegisteredExtensions is deprecated -proto.RegisterMapType is deprecated -proto.Unmarshaler is deprecated +staticcheck -go 1.19 -checks 'all' ./... > "${SC_OUT}" || true + +# Error for anything other than checks that need exclusions. +grep -v "(ST1000)" "${SC_OUT}" | grep -v "(SA1019)" | grep -v "(ST1003)" | not grep -v "(ST1019)\|\(other import of\)" + +# Exclude underscore checks for generated code. +grep "(ST1003)" "${SC_OUT}" | not grep -v '\(.pb.go:\)\|\(code_string_test.go:\)' + +# Error for duplicate imports not including grpc protos. +grep "(ST1019)\|\(other import of\)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused +channelz/grpc_channelz_v1" +go-control-plane/envoy +grpclb/grpc_lb_v1" +health/grpc_health_v1" +interop/grpc_testing" +orca/v3" +proto/grpc_gcp" +proto/grpc_lookup_v1" +reflection/grpc_reflection_v1" +reflection/grpc_reflection_v1alpha" +XXXXX PleaseIgnoreUnused' + +# Error for any package comments not in generated code. +grep "(ST1000)" "${SC_OUT}" | not grep -v "\.pb\.go:" + +# Only ignore the following deprecated types/fields/functions and exclude +# generated code. +grep "(SA1019)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused +XXXXX Protobuf related deprecation errors: +"github.com/golang/protobuf +.pb.go: +: ptypes. +proto.RegisterType +XXXXX gRPC internal usage deprecation errors: +"google.golang.org/grpc +: grpc. +: v1alpha. +: v1alphareflectionpb. +BalancerAttributes is deprecated: +CredsBundle is deprecated: +Metadata is deprecated: use Attributes instead. +NewSubConn is deprecated: +OverrideServerName is deprecated: +RemoveSubConn is deprecated: +SecurityVersion is deprecated: Target is deprecated: Use the Target field in the BuildOptions instead. -xxx_messageInfo_ -' "${SC_OUT}" - -# - special golint on package comments. -lint_package_comment_per_package() { - # Number of files in this go package. - fileCount=$(go list -f '{{len .GoFiles}}' $1) - if [ ${fileCount} -eq 0 ]; then - return 0 - fi - # Number of package errors generated by golint. - lintPackageCommentErrorsCount=$(golint --min_confidence 0 $1 | grep -c "should have a package comment") - # golint complains about every file that's missing the package comment. If the - # number of files for this package is greater than the number of errors, there's - # at least one file with package comment, good. Otherwise, fail. - if [ ${fileCount} -le ${lintPackageCommentErrorsCount} ]; then - echo "Package $1 (with ${fileCount} files) is missing package comment" - return 1 - fi -} -lint_package_comment() { - set +ex - - count=0 - for i in $(go list ./...); do - lint_package_comment_per_package "$i" - ((count += $?)) - done - - set -ex - return $count -} -lint_package_comment +UpdateAddresses is deprecated: +UpdateSubConnState is deprecated: +balancer.ErrTransientFailure is deprecated: +grpc/reflection/v1alpha/reflection.proto +XXXXX xDS deprecated fields we support +.ExactMatch +.PrefixMatch +.SafeRegexMatch +.SuffixMatch +GetContainsMatch +GetExactMatch +GetMatchSubjectAltNames +GetPrefixMatch +GetSafeRegexMatch +GetSuffixMatch +GetTlsCertificateCertificateProviderInstance +GetValidationContextCertificateProviderInstance +XXXXX TODO: Remove the below deprecation usages: +CloseNotifier +Roots.Subjects +XXXXX PleaseIgnoreUnused' echo SUCCESS diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go index 5f28148d..f4790237 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go @@ -11,6 +11,7 @@ import ( "strconv" "strings" + "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/encoding/json" "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/errors" @@ -23,7 +24,7 @@ import ( "google.golang.org/protobuf/reflect/protoregistry" ) -// Unmarshal reads the given []byte into the given proto.Message. +// Unmarshal reads the given []byte into the given [proto.Message]. // The provided message must be mutable (e.g., a non-nil pointer to a message). func Unmarshal(b []byte, m proto.Message) error { return UnmarshalOptions{}.Unmarshal(b, m) @@ -37,7 +38,7 @@ type UnmarshalOptions struct { // required fields will not return an error. AllowPartial bool - // If DiscardUnknown is set, unknown fields are ignored. + // If DiscardUnknown is set, unknown fields and enum name values are ignored. DiscardUnknown bool // Resolver is used for looking up types when unmarshaling @@ -47,9 +48,13 @@ type UnmarshalOptions struct { protoregistry.MessageTypeResolver protoregistry.ExtensionTypeResolver } + + // RecursionLimit limits how deeply messages may be nested. + // If zero, a default limit is applied. + RecursionLimit int } -// Unmarshal reads the given []byte and populates the given proto.Message +// Unmarshal reads the given []byte and populates the given [proto.Message] // using options in the UnmarshalOptions object. // It will clear the message first before setting the fields. // If it returns an error, the given message may be partially set. @@ -67,6 +72,9 @@ func (o UnmarshalOptions) unmarshal(b []byte, m proto.Message) error { if o.Resolver == nil { o.Resolver = protoregistry.GlobalTypes } + if o.RecursionLimit == 0 { + o.RecursionLimit = protowire.DefaultRecursionLimit + } dec := decoder{json.NewDecoder(b), o} if err := dec.unmarshalMessage(m.ProtoReflect(), false); err != nil { @@ -114,6 +122,10 @@ func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { // unmarshalMessage unmarshals a message into the given protoreflect.Message. func (d decoder) unmarshalMessage(m protoreflect.Message, skipTypeURL bool) error { + d.opts.RecursionLimit-- + if d.opts.RecursionLimit < 0 { + return errors.New("exceeded max recursion depth") + } if unmarshal := wellKnownTypeUnmarshaler(m.Descriptor().FullName()); unmarshal != nil { return unmarshal(d, m) } @@ -266,7 +278,9 @@ func (d decoder) unmarshalSingular(m protoreflect.Message, fd protoreflect.Field if err != nil { return err } - m.Set(fd, val) + if val.IsValid() { + m.Set(fd, val) + } return nil } @@ -329,7 +343,7 @@ func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect. } case protoreflect.EnumKind: - if v, ok := unmarshalEnum(tok, fd); ok { + if v, ok := unmarshalEnum(tok, fd, d.opts.DiscardUnknown); ok { return v, nil } @@ -474,7 +488,7 @@ func unmarshalBytes(tok json.Token) (protoreflect.Value, bool) { return protoreflect.ValueOfBytes(b), true } -func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflect.Value, bool) { +func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor, discardUnknown bool) (protoreflect.Value, bool) { switch tok.Kind() { case json.String: // Lookup EnumNumber based on name. @@ -482,6 +496,9 @@ func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflec if enumVal := fd.Enum().Values().ByName(protoreflect.Name(s)); enumVal != nil { return protoreflect.ValueOfEnum(enumVal.Number()), true } + if discardUnknown { + return protoreflect.Value{}, true + } case json.Number: if n, ok := tok.Int(32); ok { @@ -542,7 +559,9 @@ func (d decoder) unmarshalList(list protoreflect.List, fd protoreflect.FieldDesc if err != nil { return err } - list.Append(val) + if val.IsValid() { + list.Append(val) + } } } @@ -609,8 +628,9 @@ Loop: if err != nil { return err } - - mmap.Set(pkey, pval) + if pval.IsValid() { + mmap.Set(pkey, pval) + } } return nil diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/doc.go b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go index 21d5d2cb..ae71007c 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/doc.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go @@ -6,6 +6,6 @@ // format. It follows the guide at // https://protobuf.dev/programming-guides/proto3#json. // -// This package produces a different output than the standard "encoding/json" +// This package produces a different output than the standard [encoding/json] // package, which does not operate correctly on protocol buffer messages. package protojson diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go index 66b95870..3f75098b 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go @@ -31,7 +31,7 @@ func Format(m proto.Message) string { return MarshalOptions{Multiline: true}.Format(m) } -// Marshal writes the given proto.Message in JSON format using default options. +// Marshal writes the given [proto.Message] in JSON format using default options. // Do not depend on the output being stable. It may change over time across // different versions of the program. func Marshal(m proto.Message) ([]byte, error) { @@ -81,6 +81,25 @@ type MarshalOptions struct { // â•šâ•â•â•â•â•â•â•â•§â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â• EmitUnpopulated bool + // EmitDefaultValues specifies whether to emit default-valued primitive fields, + // empty lists, and empty maps. The fields affected are as follows: + // â•”â•â•â•â•â•â•â•â•¤â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•— + // â•‘ JSON │ Protobuf field â•‘ + // â• â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•£ + // â•‘ false │ non-optional scalar boolean fields â•‘ + // â•‘ 0 │ non-optional scalar numeric fields â•‘ + // â•‘ "" │ non-optional scalar string/byte fields â•‘ + // â•‘ [] │ empty repeated fields â•‘ + // â•‘ {} │ empty map fields â•‘ + // â•šâ•â•â•â•â•â•â•â•§â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â• + // + // Behaves similarly to EmitUnpopulated, but does not emit "null"-value fields, + // i.e. presence-sensing fields that are omitted will remain omitted to preserve + // presence-sensing. + // EmitUnpopulated takes precedence over EmitDefaultValues since the former generates + // a strict superset of the latter. + EmitDefaultValues bool + // Resolver is used for looking up types when expanding google.protobuf.Any // messages. If nil, this defaults to using protoregistry.GlobalTypes. Resolver interface { @@ -102,7 +121,7 @@ func (o MarshalOptions) Format(m proto.Message) string { return string(b) } -// Marshal marshals the given proto.Message in the JSON format using options in +// Marshal marshals the given [proto.Message] in the JSON format using options in // MarshalOptions. Do not depend on the output being stable. It may change over // time across different versions of the program. func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { @@ -178,7 +197,11 @@ func (m typeURLFieldRanger) Range(f func(protoreflect.FieldDescriptor, protorefl // unpopulatedFieldRanger wraps a protoreflect.Message and modifies its Range // method to additionally iterate over unpopulated fields. -type unpopulatedFieldRanger struct{ protoreflect.Message } +type unpopulatedFieldRanger struct { + protoreflect.Message + + skipNull bool +} func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { fds := m.Descriptor().Fields() @@ -192,6 +215,9 @@ func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, proto isProto2Scalar := fd.Syntax() == protoreflect.Proto2 && fd.Default().IsValid() isSingularMessage := fd.Cardinality() != protoreflect.Repeated && fd.Message() != nil if isProto2Scalar || isSingularMessage { + if m.skipNull { + continue + } v = protoreflect.Value{} // use invalid value to emit null } if !f(fd, v) { @@ -217,8 +243,11 @@ func (e encoder) marshalMessage(m protoreflect.Message, typeURL string) error { defer e.EndObject() var fields order.FieldRanger = m - if e.opts.EmitUnpopulated { - fields = unpopulatedFieldRanger{m} + switch { + case e.opts.EmitUnpopulated: + fields = unpopulatedFieldRanger{Message: m, skipNull: false} + case e.opts.EmitDefaultValues: + fields = unpopulatedFieldRanger{Message: m, skipNull: true} } if typeURL != "" { fields = typeURLFieldRanger{fields, typeURL} diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go index 6c37d417..4b177c82 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go @@ -176,7 +176,7 @@ func (d decoder) unmarshalAny(m protoreflect.Message) error { // Use another decoder to parse the unread bytes for @type field. This // avoids advancing a read from current decoder because the current JSON // object may contain the fields of the embedded type. - dec := decoder{d.Clone(), UnmarshalOptions{}} + dec := decoder{d.Clone(), UnmarshalOptions{RecursionLimit: d.opts.RecursionLimit}} tok, err := findTypeURL(dec) switch err { case errEmptyObject: @@ -308,48 +308,29 @@ Loop: // array) in order to advance the read to the next JSON value. It relies on // the decoder returning an error if the types are not in valid sequence. func (d decoder) skipJSONValue() error { - tok, err := d.Read() - if err != nil { - return err - } - // Only need to continue reading for objects and arrays. - switch tok.Kind() { - case json.ObjectOpen: - for { - tok, err := d.Read() - if err != nil { - return err - } - switch tok.Kind() { - case json.ObjectClose: - return nil - case json.Name: - // Skip object field value. - if err := d.skipJSONValue(); err != nil { - return err - } - } + var open int + for { + tok, err := d.Read() + if err != nil { + return err } - - case json.ArrayOpen: - for { - tok, err := d.Peek() - if err != nil { - return err - } - switch tok.Kind() { - case json.ArrayClose: - d.Read() - return nil - default: - // Skip array item. - if err := d.skipJSONValue(); err != nil { - return err - } + switch tok.Kind() { + case json.ObjectClose, json.ArrayClose: + open-- + case json.ObjectOpen, json.ArrayOpen: + open++ + if open > d.opts.RecursionLimit { + return errors.New("exceeded max recursion depth") } + case json.EOF: + // This can only happen if there's a bug in Decoder.Read. + // Avoid an infinite loop if this does happen. + return errors.New("unexpected EOF") + } + if open == 0 { + return nil } } - return nil } // unmarshalAnyValue unmarshals the given custom-type message from the JSON diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go index 4921b2d4..a45f112b 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go @@ -21,7 +21,7 @@ import ( "google.golang.org/protobuf/reflect/protoregistry" ) -// Unmarshal reads the given []byte into the given proto.Message. +// Unmarshal reads the given []byte into the given [proto.Message]. // The provided message must be mutable (e.g., a non-nil pointer to a message). func Unmarshal(b []byte, m proto.Message) error { return UnmarshalOptions{}.Unmarshal(b, m) @@ -51,7 +51,7 @@ type UnmarshalOptions struct { } } -// Unmarshal reads the given []byte and populates the given proto.Message +// Unmarshal reads the given []byte and populates the given [proto.Message] // using options in the UnmarshalOptions object. // The provided message must be mutable (e.g., a non-nil pointer to a message). func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error { @@ -739,7 +739,9 @@ func (d decoder) skipValue() error { case text.ListClose: return nil case text.MessageOpen: - return d.skipMessageValue() + if err := d.skipMessageValue(); err != nil { + return err + } default: // Skip items. This will not validate whether skipped values are // of the same type or not, same behavior as C++ diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go index 722a7b41..95967e81 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go @@ -33,7 +33,7 @@ func Format(m proto.Message) string { return MarshalOptions{Multiline: true}.Format(m) } -// Marshal writes the given proto.Message in textproto format using default +// Marshal writes the given [proto.Message] in textproto format using default // options. Do not depend on the output being stable. It may change over time // across different versions of the program. func Marshal(m proto.Message) ([]byte, error) { @@ -97,7 +97,7 @@ func (o MarshalOptions) Format(m proto.Message) string { return string(b) } -// Marshal writes the given proto.Message in textproto format using options in +// Marshal writes the given [proto.Message] in textproto format using options in // MarshalOptions object. Do not depend on the output being stable. It may // change over time across different versions of the program. func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { diff --git a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go index f4b4686c..e942bc98 100644 --- a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go +++ b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go @@ -6,7 +6,7 @@ // See https://protobuf.dev/programming-guides/encoding. // // For marshaling and unmarshaling entire protobuf messages, -// use the "google.golang.org/protobuf/proto" package instead. +// use the [google.golang.org/protobuf/proto] package instead. package protowire import ( @@ -87,7 +87,7 @@ func ParseError(n int) error { // ConsumeField parses an entire field record (both tag and value) and returns // the field number, the wire type, and the total length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). // // The total length includes the tag header and the end group marker (if the // field is a group). @@ -104,8 +104,8 @@ func ConsumeField(b []byte) (Number, Type, int) { } // ConsumeFieldValue parses a field value and returns its length. -// This assumes that the field Number and wire Type have already been parsed. -// This returns a negative length upon an error (see ParseError). +// This assumes that the field [Number] and wire [Type] have already been parsed. +// This returns a negative length upon an error (see [ParseError]). // // When parsing a group, the length includes the end group marker and // the end group is verified to match the starting field number. @@ -164,7 +164,7 @@ func AppendTag(b []byte, num Number, typ Type) []byte { } // ConsumeTag parses b as a varint-encoded tag, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeTag(b []byte) (Number, Type, int) { v, n := ConsumeVarint(b) if n < 0 { @@ -263,7 +263,7 @@ func AppendVarint(b []byte, v uint64) []byte { } // ConsumeVarint parses b as a varint-encoded uint64, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeVarint(b []byte) (v uint64, n int) { var y uint64 if len(b) <= 0 { @@ -384,7 +384,7 @@ func AppendFixed32(b []byte, v uint32) []byte { } // ConsumeFixed32 parses b as a little-endian uint32, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeFixed32(b []byte) (v uint32, n int) { if len(b) < 4 { return 0, errCodeTruncated @@ -412,7 +412,7 @@ func AppendFixed64(b []byte, v uint64) []byte { } // ConsumeFixed64 parses b as a little-endian uint64, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeFixed64(b []byte) (v uint64, n int) { if len(b) < 8 { return 0, errCodeTruncated @@ -432,7 +432,7 @@ func AppendBytes(b []byte, v []byte) []byte { } // ConsumeBytes parses b as a length-prefixed bytes value, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeBytes(b []byte) (v []byte, n int) { m, n := ConsumeVarint(b) if n < 0 { @@ -456,7 +456,7 @@ func AppendString(b []byte, v string) []byte { } // ConsumeString parses b as a length-prefixed bytes value, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeString(b []byte) (v string, n int) { bb, n := ConsumeBytes(b) return string(bb), n @@ -471,7 +471,7 @@ func AppendGroup(b []byte, num Number, v []byte) []byte { // ConsumeGroup parses b as a group value until the trailing end group marker, // and verifies that the end marker matches the provided num. The value v // does not contain the end marker, while the length does contain the end marker. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeGroup(num Number, b []byte) (v []byte, n int) { n = ConsumeFieldValue(num, StartGroupType, b) if n < 0 { @@ -495,8 +495,8 @@ func SizeGroup(num Number, n int) int { return n + SizeTag(num) } -// DecodeTag decodes the field Number and wire Type from its unified form. -// The Number is -1 if the decoded field number overflows int32. +// DecodeTag decodes the field [Number] and wire [Type] from its unified form. +// The [Number] is -1 if the decoded field number overflows int32. // Other than overflow, this does not check for field number validity. func DecodeTag(x uint64) (Number, Type) { // NOTE: MessageSet allows for larger field numbers than normal. @@ -506,7 +506,7 @@ func DecodeTag(x uint64) (Number, Type) { return Number(x >> 3), Type(x & 7) } -// EncodeTag encodes the field Number and wire Type into its unified form. +// EncodeTag encodes the field [Number] and wire [Type] into its unified form. func EncodeTag(num Number, typ Type) uint64 { return uint64(num)<<3 | uint64(typ&7) } diff --git a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go index db5248e1..a45625c8 100644 --- a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go +++ b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go @@ -83,7 +83,13 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { case protoreflect.FileImports: for i := 0; i < vs.Len(); i++ { var rs records - rs.Append(reflect.ValueOf(vs.Get(i)), "Path", "Package", "IsPublic", "IsWeak") + rv := reflect.ValueOf(vs.Get(i)) + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Path"), "Path"}, + {rv.MethodByName("Package"), "Package"}, + {rv.MethodByName("IsPublic"), "IsPublic"}, + {rv.MethodByName("IsWeak"), "IsWeak"}, + }...) ss = append(ss, "{"+rs.Join()+"}") } return start + joinStrings(ss, allowMulti) + end @@ -92,34 +98,26 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { for i := 0; i < vs.Len(); i++ { m := reflect.ValueOf(vs).MethodByName("Get") v := m.Call([]reflect.Value{reflect.ValueOf(i)})[0].Interface() - ss = append(ss, formatDescOpt(v.(protoreflect.Descriptor), false, allowMulti && !isEnumValue)) + ss = append(ss, formatDescOpt(v.(protoreflect.Descriptor), false, allowMulti && !isEnumValue, nil)) } return start + joinStrings(ss, allowMulti && isEnumValue) + end } } -// descriptorAccessors is a list of accessors to print for each descriptor. -// -// Do not print all accessors since some contain redundant information, -// while others are pointers that we do not want to follow since the descriptor -// is actually a cyclic graph. -// -// Using a list allows us to print the accessors in a sensible order. -var descriptorAccessors = map[reflect.Type][]string{ - reflect.TypeOf((*protoreflect.FileDescriptor)(nil)).Elem(): {"Path", "Package", "Imports", "Messages", "Enums", "Extensions", "Services"}, - reflect.TypeOf((*protoreflect.MessageDescriptor)(nil)).Elem(): {"IsMapEntry", "Fields", "Oneofs", "ReservedNames", "ReservedRanges", "RequiredNumbers", "ExtensionRanges", "Messages", "Enums", "Extensions"}, - reflect.TypeOf((*protoreflect.FieldDescriptor)(nil)).Elem(): {"Number", "Cardinality", "Kind", "HasJSONName", "JSONName", "HasPresence", "IsExtension", "IsPacked", "IsWeak", "IsList", "IsMap", "MapKey", "MapValue", "HasDefault", "Default", "ContainingOneof", "ContainingMessage", "Message", "Enum"}, - reflect.TypeOf((*protoreflect.OneofDescriptor)(nil)).Elem(): {"Fields"}, // not directly used; must keep in sync with formatDescOpt - reflect.TypeOf((*protoreflect.EnumDescriptor)(nil)).Elem(): {"Values", "ReservedNames", "ReservedRanges"}, - reflect.TypeOf((*protoreflect.EnumValueDescriptor)(nil)).Elem(): {"Number"}, - reflect.TypeOf((*protoreflect.ServiceDescriptor)(nil)).Elem(): {"Methods"}, - reflect.TypeOf((*protoreflect.MethodDescriptor)(nil)).Elem(): {"Input", "Output", "IsStreamingClient", "IsStreamingServer"}, +type methodAndName struct { + method reflect.Value + name string } func FormatDesc(s fmt.State, r rune, t protoreflect.Descriptor) { - io.WriteString(s, formatDescOpt(t, true, r == 'v' && (s.Flag('+') || s.Flag('#')))) + io.WriteString(s, formatDescOpt(t, true, r == 'v' && (s.Flag('+') || s.Flag('#')), nil)) } -func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string { + +func InternalFormatDescOptForTesting(t protoreflect.Descriptor, isRoot, allowMulti bool, record func(string)) string { + return formatDescOpt(t, isRoot, allowMulti, record) +} + +func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool, record func(string)) string { rv := reflect.ValueOf(t) rt := rv.MethodByName("ProtoType").Type().In(0) @@ -129,26 +127,60 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string { } _, isFile := t.(protoreflect.FileDescriptor) - rs := records{allowMulti: allowMulti} + rs := records{ + allowMulti: allowMulti, + record: record, + } if t.IsPlaceholder() { if isFile { - rs.Append(rv, "Path", "Package", "IsPlaceholder") + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Path"), "Path"}, + {rv.MethodByName("Package"), "Package"}, + {rv.MethodByName("IsPlaceholder"), "IsPlaceholder"}, + }...) } else { - rs.Append(rv, "FullName", "IsPlaceholder") + rs.Append(rv, []methodAndName{ + {rv.MethodByName("FullName"), "FullName"}, + {rv.MethodByName("IsPlaceholder"), "IsPlaceholder"}, + }...) } } else { switch { case isFile: - rs.Append(rv, "Syntax") + rs.Append(rv, methodAndName{rv.MethodByName("Syntax"), "Syntax"}) case isRoot: - rs.Append(rv, "Syntax", "FullName") + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Syntax"), "Syntax"}, + {rv.MethodByName("FullName"), "FullName"}, + }...) default: - rs.Append(rv, "Name") + rs.Append(rv, methodAndName{rv.MethodByName("Name"), "Name"}) } switch t := t.(type) { case protoreflect.FieldDescriptor: - for _, s := range descriptorAccessors[rt] { - switch s { + accessors := []methodAndName{ + {rv.MethodByName("Number"), "Number"}, + {rv.MethodByName("Cardinality"), "Cardinality"}, + {rv.MethodByName("Kind"), "Kind"}, + {rv.MethodByName("HasJSONName"), "HasJSONName"}, + {rv.MethodByName("JSONName"), "JSONName"}, + {rv.MethodByName("HasPresence"), "HasPresence"}, + {rv.MethodByName("IsExtension"), "IsExtension"}, + {rv.MethodByName("IsPacked"), "IsPacked"}, + {rv.MethodByName("IsWeak"), "IsWeak"}, + {rv.MethodByName("IsList"), "IsList"}, + {rv.MethodByName("IsMap"), "IsMap"}, + {rv.MethodByName("MapKey"), "MapKey"}, + {rv.MethodByName("MapValue"), "MapValue"}, + {rv.MethodByName("HasDefault"), "HasDefault"}, + {rv.MethodByName("Default"), "Default"}, + {rv.MethodByName("ContainingOneof"), "ContainingOneof"}, + {rv.MethodByName("ContainingMessage"), "ContainingMessage"}, + {rv.MethodByName("Message"), "Message"}, + {rv.MethodByName("Enum"), "Enum"}, + } + for _, s := range accessors { + switch s.name { case "MapKey": if k := t.MapKey(); k != nil { rs.recs = append(rs.recs, [2]string{"MapKey", k.Kind().String()}) @@ -157,20 +189,20 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string { if v := t.MapValue(); v != nil { switch v.Kind() { case protoreflect.EnumKind: - rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Enum().FullName())}) + rs.AppendRecs("MapValue", [2]string{"MapValue", string(v.Enum().FullName())}) case protoreflect.MessageKind, protoreflect.GroupKind: - rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Message().FullName())}) + rs.AppendRecs("MapValue", [2]string{"MapValue", string(v.Message().FullName())}) default: - rs.recs = append(rs.recs, [2]string{"MapValue", v.Kind().String()}) + rs.AppendRecs("MapValue", [2]string{"MapValue", v.Kind().String()}) } } case "ContainingOneof": if od := t.ContainingOneof(); od != nil { - rs.recs = append(rs.recs, [2]string{"Oneof", string(od.Name())}) + rs.AppendRecs("ContainingOneof", [2]string{"Oneof", string(od.Name())}) } case "ContainingMessage": if t.IsExtension() { - rs.recs = append(rs.recs, [2]string{"Extendee", string(t.ContainingMessage().FullName())}) + rs.AppendRecs("ContainingMessage", [2]string{"Extendee", string(t.ContainingMessage().FullName())}) } case "Message": if !t.IsMap() { @@ -187,13 +219,61 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string { ss = append(ss, string(fs.Get(i).Name())) } if len(ss) > 0 { - rs.recs = append(rs.recs, [2]string{"Fields", "[" + joinStrings(ss, false) + "]"}) + rs.AppendRecs("Fields", [2]string{"Fields", "[" + joinStrings(ss, false) + "]"}) } - default: - rs.Append(rv, descriptorAccessors[rt]...) + + case protoreflect.FileDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Path"), "Path"}, + {rv.MethodByName("Package"), "Package"}, + {rv.MethodByName("Imports"), "Imports"}, + {rv.MethodByName("Messages"), "Messages"}, + {rv.MethodByName("Enums"), "Enums"}, + {rv.MethodByName("Extensions"), "Extensions"}, + {rv.MethodByName("Services"), "Services"}, + }...) + + case protoreflect.MessageDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("IsMapEntry"), "IsMapEntry"}, + {rv.MethodByName("Fields"), "Fields"}, + {rv.MethodByName("Oneofs"), "Oneofs"}, + {rv.MethodByName("ReservedNames"), "ReservedNames"}, + {rv.MethodByName("ReservedRanges"), "ReservedRanges"}, + {rv.MethodByName("RequiredNumbers"), "RequiredNumbers"}, + {rv.MethodByName("ExtensionRanges"), "ExtensionRanges"}, + {rv.MethodByName("Messages"), "Messages"}, + {rv.MethodByName("Enums"), "Enums"}, + {rv.MethodByName("Extensions"), "Extensions"}, + }...) + + case protoreflect.EnumDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Values"), "Values"}, + {rv.MethodByName("ReservedNames"), "ReservedNames"}, + {rv.MethodByName("ReservedRanges"), "ReservedRanges"}, + }...) + + case protoreflect.EnumValueDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Number"), "Number"}, + }...) + + case protoreflect.ServiceDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Methods"), "Methods"}, + }...) + + case protoreflect.MethodDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Input"), "Input"}, + {rv.MethodByName("Output"), "Output"}, + {rv.MethodByName("IsStreamingClient"), "IsStreamingClient"}, + {rv.MethodByName("IsStreamingServer"), "IsStreamingServer"}, + }...) } - if rv.MethodByName("GoType").IsValid() { - rs.Append(rv, "GoType") + if m := rv.MethodByName("GoType"); m.IsValid() { + rs.Append(rv, methodAndName{m, "GoType"}) } } return start + rs.Join() + end @@ -202,19 +282,34 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string { type records struct { recs [][2]string allowMulti bool + + // record is a function that will be called for every Append() or + // AppendRecs() call, to be used for testing with the + // InternalFormatDescOptForTesting function. + record func(string) } -func (rs *records) Append(v reflect.Value, accessors ...string) { +func (rs *records) AppendRecs(fieldName string, newRecs [2]string) { + if rs.record != nil { + rs.record(fieldName) + } + rs.recs = append(rs.recs, newRecs) +} + +func (rs *records) Append(v reflect.Value, accessors ...methodAndName) { for _, a := range accessors { + if rs.record != nil { + rs.record(a.name) + } var rv reflect.Value - if m := v.MethodByName(a); m.IsValid() { - rv = m.Call(nil)[0] + if a.method.IsValid() { + rv = a.method.Call(nil)[0] } if v.Kind() == reflect.Struct && !rv.IsValid() { - rv = v.FieldByName(a) + rv = v.FieldByName(a.name) } if !rv.IsValid() { - panic(fmt.Sprintf("unknown accessor: %v.%s", v.Type(), a)) + panic(fmt.Sprintf("unknown accessor: %v.%s", v.Type(), a.name)) } if _, ok := rv.Interface().(protoreflect.Value); ok { rv = rv.MethodByName("Interface").Call(nil)[0] @@ -261,7 +356,7 @@ func (rs *records) Append(v reflect.Value, accessors ...string) { default: s = fmt.Sprint(v) } - rs.recs = append(rs.recs, [2]string{a, s}) + rs.recs = append(rs.recs, [2]string{a.name, s}) } } diff --git a/vendor/google.golang.org/protobuf/internal/editiondefaults/defaults.go b/vendor/google.golang.org/protobuf/internal/editiondefaults/defaults.go new file mode 100644 index 00000000..14656b65 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/editiondefaults/defaults.go @@ -0,0 +1,12 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package editiondefaults contains the binary representation of the editions +// defaults. +package editiondefaults + +import _ "embed" + +//go:embed editions_defaults.binpb +var Defaults []byte diff --git a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb new file mode 100644 index 00000000..18f07568 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb @@ -0,0 +1,4 @@ + +  (0æ +  (0ç +  (0è æ(è \ No newline at end of file diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go index d043a6eb..d2b3ac03 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go @@ -121,7 +121,7 @@ func (d *Decoder) Read() (Token, error) { case ObjectClose: if len(d.openStack) == 0 || - d.lastToken.kind == comma || + d.lastToken.kind&(Name|comma) != 0 || d.openStack[len(d.openStack)-1] != ObjectOpen { return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index 7c3689ba..8826bcf4 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -21,11 +21,26 @@ import ( "google.golang.org/protobuf/reflect/protoregistry" ) +// Edition is an Enum for proto2.Edition +type Edition int32 + +// These values align with the value of Enum in descriptor.proto which allows +// direct conversion between the proto enum and this enum. +const ( + EditionUnknown Edition = 0 + EditionProto2 Edition = 998 + EditionProto3 Edition = 999 + Edition2023 Edition = 1000 + EditionUnsupported Edition = 100000 +) + // The types in this file may have a suffix: // • L0: Contains fields common to all descriptors (except File) and // must be initialized up front. // • L1: Contains fields specific to a descriptor and -// must be initialized up front. +// must be initialized up front. If the associated proto uses Editions, the +// Editions features must always be resolved. If not explicitly set, the +// appropriate default must be resolved and set. // • L2: Contains fields that are lazily initialized when constructing // from the raw file descriptor. When constructing as a literal, the L2 // fields must be initialized up front. @@ -44,6 +59,7 @@ type ( } FileL1 struct { Syntax protoreflect.Syntax + Edition Edition // Only used if Syntax == Editions Path string Package protoreflect.FullName @@ -51,12 +67,41 @@ type ( Messages Messages Extensions Extensions Services Services + + EditionFeatures EditionFeatures } FileL2 struct { Options func() protoreflect.ProtoMessage Imports FileImports Locations SourceLocations } + + EditionFeatures struct { + // IsFieldPresence is true if field_presence is EXPLICIT + // https://protobuf.dev/editions/features/#field_presence + IsFieldPresence bool + // IsFieldPresence is true if field_presence is LEGACY_REQUIRED + // https://protobuf.dev/editions/features/#field_presence + IsLegacyRequired bool + // IsOpenEnum is true if enum_type is OPEN + // https://protobuf.dev/editions/features/#enum_type + IsOpenEnum bool + // IsPacked is true if repeated_field_encoding is PACKED + // https://protobuf.dev/editions/features/#repeated_field_encoding + IsPacked bool + // IsUTF8Validated is true if utf_validation is VERIFY + // https://protobuf.dev/editions/features/#utf8_validation + IsUTF8Validated bool + // IsDelimitedEncoded is true if message_encoding is DELIMITED + // https://protobuf.dev/editions/features/#message_encoding + IsDelimitedEncoded bool + // IsJSONCompliant is true if json_format is ALLOW + // https://protobuf.dev/editions/features/#json_format + IsJSONCompliant bool + // GenerateLegacyUnmarshalJSON determines if the plugin generates the + // UnmarshalJSON([]byte) error method for enums. + GenerateLegacyUnmarshalJSON bool + } ) func (fd *File) ParentFile() protoreflect.FileDescriptor { return fd } @@ -117,6 +162,8 @@ type ( } EnumL1 struct { eagerValues bool // controls whether EnumL2.Values is already populated + + EditionFeatures EditionFeatures } EnumL2 struct { Options func() protoreflect.ProtoMessage @@ -178,6 +225,8 @@ type ( Extensions Extensions IsMapEntry bool // promoted from google.protobuf.MessageOptions IsMessageSet bool // promoted from google.protobuf.MessageOptions + + EditionFeatures EditionFeatures } MessageL2 struct { Options func() protoreflect.ProtoMessage @@ -210,6 +259,8 @@ type ( ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields Enum protoreflect.EnumDescriptor Message protoreflect.MessageDescriptor + + EditionFeatures EditionFeatures } Oneof struct { @@ -219,6 +270,8 @@ type ( OneofL1 struct { Options func() protoreflect.ProtoMessage Fields OneofFields // must be consistent with Message.Fields.ContainingOneof + + EditionFeatures EditionFeatures } ) @@ -268,23 +321,36 @@ func (fd *Field) Options() protoreflect.ProtoMessage { } func (fd *Field) Number() protoreflect.FieldNumber { return fd.L1.Number } func (fd *Field) Cardinality() protoreflect.Cardinality { return fd.L1.Cardinality } -func (fd *Field) Kind() protoreflect.Kind { return fd.L1.Kind } -func (fd *Field) HasJSONName() bool { return fd.L1.StringName.hasJSON } -func (fd *Field) JSONName() string { return fd.L1.StringName.getJSON(fd) } -func (fd *Field) TextName() string { return fd.L1.StringName.getText(fd) } +func (fd *Field) Kind() protoreflect.Kind { + return fd.L1.Kind +} +func (fd *Field) HasJSONName() bool { return fd.L1.StringName.hasJSON } +func (fd *Field) JSONName() string { return fd.L1.StringName.getJSON(fd) } +func (fd *Field) TextName() string { return fd.L1.StringName.getText(fd) } func (fd *Field) HasPresence() bool { - return fd.L1.Cardinality != protoreflect.Repeated && (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 || fd.L1.Message != nil || fd.L1.ContainingOneof != nil) + if fd.L1.Cardinality == protoreflect.Repeated { + return false + } + explicitFieldPresence := fd.Syntax() == protoreflect.Editions && fd.L1.EditionFeatures.IsFieldPresence + return fd.Syntax() == protoreflect.Proto2 || explicitFieldPresence || fd.L1.Message != nil || fd.L1.ContainingOneof != nil } func (fd *Field) HasOptionalKeyword() bool { return (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 && fd.L1.Cardinality == protoreflect.Optional && fd.L1.ContainingOneof == nil) || fd.L1.IsProto3Optional } func (fd *Field) IsPacked() bool { - if !fd.L1.HasPacked && fd.L0.ParentFile.L1.Syntax != protoreflect.Proto2 && fd.L1.Cardinality == protoreflect.Repeated { - switch fd.L1.Kind { - case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: - default: - return true - } + if fd.L1.Cardinality != protoreflect.Repeated { + return false + } + switch fd.L1.Kind { + case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: + return false + } + if fd.L0.ParentFile.L1.Syntax == protoreflect.Editions { + return fd.L1.EditionFeatures.IsPacked + } + if fd.L0.ParentFile.L1.Syntax == protoreflect.Proto3 { + // proto3 repeated fields are packed by default. + return !fd.L1.HasPacked || fd.L1.IsPacked } return fd.L1.IsPacked } @@ -333,6 +399,9 @@ func (fd *Field) ProtoType(protoreflect.FieldDescriptor) {} // WARNING: This method is exempt from the compatibility promise and may be // removed in the future without warning. func (fd *Field) EnforceUTF8() bool { + if fd.L0.ParentFile.L1.Syntax == protoreflect.Editions { + return fd.L1.EditionFeatures.IsUTF8Validated + } if fd.L1.HasEnforceUTF8 { return fd.L1.EnforceUTF8 } @@ -359,10 +428,11 @@ type ( L2 *ExtensionL2 // protected by fileDesc.once } ExtensionL1 struct { - Number protoreflect.FieldNumber - Extendee protoreflect.MessageDescriptor - Cardinality protoreflect.Cardinality - Kind protoreflect.Kind + Number protoreflect.FieldNumber + Extendee protoreflect.MessageDescriptor + Cardinality protoreflect.Cardinality + Kind protoreflect.Kind + EditionFeatures EditionFeatures } ExtensionL2 struct { Options func() protoreflect.ProtoMessage diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go index 4a1584c9..237e64fd 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -5,6 +5,7 @@ package filedesc import ( + "fmt" "sync" "google.golang.org/protobuf/encoding/protowire" @@ -98,6 +99,7 @@ func (fd *File) unmarshalSeed(b []byte) { var prevField protoreflect.FieldNumber var numEnums, numMessages, numExtensions, numServices int var posEnums, posMessages, posExtensions, posServices int + var options []byte b0 := b for len(b) > 0 { num, typ, n := protowire.ConsumeTag(b) @@ -113,6 +115,8 @@ func (fd *File) unmarshalSeed(b []byte) { fd.L1.Syntax = protoreflect.Proto2 case "proto3": fd.L1.Syntax = protoreflect.Proto3 + case "editions": + fd.L1.Syntax = protoreflect.Editions default: panic("invalid syntax") } @@ -120,6 +124,8 @@ func (fd *File) unmarshalSeed(b []byte) { fd.L1.Path = sb.MakeString(v) case genid.FileDescriptorProto_Package_field_number: fd.L1.Package = protoreflect.FullName(sb.MakeString(v)) + case genid.FileDescriptorProto_Options_field_number: + options = v case genid.FileDescriptorProto_EnumType_field_number: if prevField != genid.FileDescriptorProto_EnumType_field_number { if numEnums > 0 { @@ -154,6 +160,13 @@ func (fd *File) unmarshalSeed(b []byte) { numServices++ } prevField = num + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FileDescriptorProto_Edition_field_number: + fd.L1.Edition = Edition(v) + } default: m := protowire.ConsumeFieldValue(num, typ, b) b = b[m:] @@ -166,6 +179,15 @@ func (fd *File) unmarshalSeed(b []byte) { fd.L1.Syntax = protoreflect.Proto2 } + if fd.L1.Syntax == protoreflect.Editions { + fd.L1.EditionFeatures = getFeaturesFor(fd.L1.Edition) + } + + // Parse editions features from options if any + if options != nil { + fd.unmarshalSeedOptions(options) + } + // Must allocate all declarations before parsing each descriptor type // to ensure we handled all descriptors in "flattened ordering". if numEnums > 0 { @@ -219,6 +241,28 @@ func (fd *File) unmarshalSeed(b []byte) { } } +func (fd *File) unmarshalSeedOptions(b []byte) { + for b := b; len(b) > 0; { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FileOptions_Features_field_number: + if fd.Syntax() != protoreflect.Editions { + panic(fmt.Sprintf("invalid descriptor: using edition features in a proto with syntax %s", fd.Syntax())) + } + fd.L1.EditionFeatures = unmarshalFeatureSet(v, fd.L1.EditionFeatures) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} + func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { ed.L0.ParentFile = pf ed.L0.Parent = pd @@ -275,6 +319,7 @@ func (md *Message) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protor md.L0.ParentFile = pf md.L0.Parent = pd md.L0.Index = i + md.L1.EditionFeatures = featuresFromParentDesc(md.Parent()) var prevField protoreflect.FieldNumber var numEnums, numMessages, numExtensions int @@ -380,6 +425,13 @@ func (md *Message) unmarshalSeedOptions(b []byte) { case genid.MessageOptions_MessageSetWireFormat_field_number: md.L1.IsMessageSet = protowire.DecodeBool(v) } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.MessageOptions_Features_field_number: + md.L1.EditionFeatures = unmarshalFeatureSet(v, md.L1.EditionFeatures) + } default: m := protowire.ConsumeFieldValue(num, typ, b) b = b[m:] diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index 736a19a7..482a61cc 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -414,6 +414,7 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoref fd.L0.ParentFile = pf fd.L0.Parent = pd fd.L0.Index = i + fd.L1.EditionFeatures = featuresFromParentDesc(fd.Parent()) var rawTypeName []byte var rawOptions []byte @@ -465,6 +466,12 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoref b = b[m:] } } + if fd.Syntax() == protoreflect.Editions && fd.L1.Kind == protoreflect.MessageKind && fd.L1.EditionFeatures.IsDelimitedEncoded { + fd.L1.Kind = protoreflect.GroupKind + } + if fd.Syntax() == protoreflect.Editions && fd.L1.EditionFeatures.IsLegacyRequired { + fd.L1.Cardinality = protoreflect.Required + } if rawTypeName != nil { name := makeFullName(sb, rawTypeName) switch fd.L1.Kind { @@ -497,6 +504,13 @@ func (fd *Field) unmarshalOptions(b []byte) { fd.L1.HasEnforceUTF8 = true fd.L1.EnforceUTF8 = protowire.DecodeBool(v) } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FieldOptions_Features_field_number: + fd.L1.EditionFeatures = unmarshalFeatureSet(v, fd.L1.EditionFeatures) + } default: m := protowire.ConsumeFieldValue(num, typ, b) b = b[m:] @@ -534,6 +548,7 @@ func (od *Oneof) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoref func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { var rawTypeName []byte var rawOptions []byte + xd.L1.EditionFeatures = featuresFromParentDesc(xd.L1.Extendee) xd.L2 = new(ExtensionL2) for len(b) > 0 { num, typ, n := protowire.ConsumeTag(b) @@ -565,6 +580,12 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { b = b[m:] } } + if xd.Syntax() == protoreflect.Editions && xd.L1.Kind == protoreflect.MessageKind && xd.L1.EditionFeatures.IsDelimitedEncoded { + xd.L1.Kind = protoreflect.GroupKind + } + if xd.Syntax() == protoreflect.Editions && xd.L1.EditionFeatures.IsLegacyRequired { + xd.L1.Cardinality = protoreflect.Required + } if rawTypeName != nil { name := makeFullName(sb, rawTypeName) switch xd.L1.Kind { @@ -589,6 +610,13 @@ func (xd *Extension) unmarshalOptions(b []byte) { case genid.FieldOptions_Packed_field_number: xd.L2.IsPacked = protowire.DecodeBool(v) } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FieldOptions_Features_field_number: + xd.L1.EditionFeatures = unmarshalFeatureSet(v, xd.L1.EditionFeatures) + } default: m := protowire.ConsumeFieldValue(num, typ, b) b = b[m:] diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go new file mode 100644 index 00000000..0375a49d --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go @@ -0,0 +1,142 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package filedesc + +import ( + "fmt" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/editiondefaults" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/reflect/protoreflect" +) + +var defaultsCache = make(map[Edition]EditionFeatures) + +func init() { + unmarshalEditionDefaults(editiondefaults.Defaults) +} + +func unmarshalGoFeature(b []byte, parent EditionFeatures) EditionFeatures { + for len(b) > 0 { + num, _, n := protowire.ConsumeTag(b) + b = b[n:] + switch num { + case genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + parent.GenerateLegacyUnmarshalJSON = protowire.DecodeBool(v) + default: + panic(fmt.Sprintf("unkown field number %d while unmarshalling GoFeatures", num)) + } + } + return parent +} + +func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FeatureSet_FieldPresence_field_number: + parent.IsFieldPresence = v == genid.FeatureSet_EXPLICIT_enum_value || v == genid.FeatureSet_LEGACY_REQUIRED_enum_value + parent.IsLegacyRequired = v == genid.FeatureSet_LEGACY_REQUIRED_enum_value + case genid.FeatureSet_EnumType_field_number: + parent.IsOpenEnum = v == genid.FeatureSet_OPEN_enum_value + case genid.FeatureSet_RepeatedFieldEncoding_field_number: + parent.IsPacked = v == genid.FeatureSet_PACKED_enum_value + case genid.FeatureSet_Utf8Validation_field_number: + parent.IsUTF8Validated = v == genid.FeatureSet_VERIFY_enum_value + case genid.FeatureSet_MessageEncoding_field_number: + parent.IsDelimitedEncoded = v == genid.FeatureSet_DELIMITED_enum_value + case genid.FeatureSet_JsonFormat_field_number: + parent.IsJSONCompliant = v == genid.FeatureSet_ALLOW_enum_value + default: + panic(fmt.Sprintf("unkown field number %d while unmarshalling FeatureSet", num)) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number: + parent = unmarshalGoFeature(v, parent) + } + } + } + + return parent +} + +func featuresFromParentDesc(parentDesc protoreflect.Descriptor) EditionFeatures { + var parentFS EditionFeatures + switch p := parentDesc.(type) { + case *File: + parentFS = p.L1.EditionFeatures + case *Message: + parentFS = p.L1.EditionFeatures + default: + panic(fmt.Sprintf("unknown parent type %T", parentDesc)) + } + return parentFS +} + +func unmarshalEditionDefault(b []byte) { + var ed Edition + var fs EditionFeatures + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_number: + ed = Edition(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FeatureSetDefaults_FeatureSetEditionDefault_Features_field_number: + fs = unmarshalFeatureSet(v, fs) + } + } + } + defaultsCache[ed] = fs +} + +func unmarshalEditionDefaults(b []byte) { + for len(b) > 0 { + num, _, n := protowire.ConsumeTag(b) + b = b[n:] + switch num { + case genid.FeatureSetDefaults_Defaults_field_number: + def, m := protowire.ConsumeBytes(b) + b = b[m:] + unmarshalEditionDefault(def) + case genid.FeatureSetDefaults_MinimumEdition_field_number, + genid.FeatureSetDefaults_MaximumEdition_field_number: + // We don't care about the minimum and maximum editions. If the + // edition we are looking for later on is not in the cache we know + // it is outside of the range between minimum and maximum edition. + _, m := protowire.ConsumeVarint(b) + b = b[m:] + default: + panic(fmt.Sprintf("unkown field number %d while unmarshalling EditionDefault", num)) + } + } +} + +func getFeaturesFor(ed Edition) EditionFeatures { + if def, ok := defaultsCache[ed]; ok { + return def + } + panic(fmt.Sprintf("unsupported edition: %v", ed)) +} diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go index 136f1b21..40272c89 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -12,6 +12,27 @@ import ( const File_google_protobuf_descriptor_proto = "google/protobuf/descriptor.proto" +// Full and short names for google.protobuf.Edition. +const ( + Edition_enum_fullname = "google.protobuf.Edition" + Edition_enum_name = "Edition" +) + +// Enum values for google.protobuf.Edition. +const ( + Edition_EDITION_UNKNOWN_enum_value = 0 + Edition_EDITION_PROTO2_enum_value = 998 + Edition_EDITION_PROTO3_enum_value = 999 + Edition_EDITION_2023_enum_value = 1000 + Edition_EDITION_2024_enum_value = 1001 + Edition_EDITION_1_TEST_ONLY_enum_value = 1 + Edition_EDITION_2_TEST_ONLY_enum_value = 2 + Edition_EDITION_99997_TEST_ONLY_enum_value = 99997 + Edition_EDITION_99998_TEST_ONLY_enum_value = 99998 + Edition_EDITION_99999_TEST_ONLY_enum_value = 99999 + Edition_EDITION_MAX_enum_value = 2147483647 +) + // Names for google.protobuf.FileDescriptorSet. const ( FileDescriptorSet_message_name protoreflect.Name = "FileDescriptorSet" @@ -81,7 +102,7 @@ const ( FileDescriptorProto_Options_field_number protoreflect.FieldNumber = 8 FileDescriptorProto_SourceCodeInfo_field_number protoreflect.FieldNumber = 9 FileDescriptorProto_Syntax_field_number protoreflect.FieldNumber = 12 - FileDescriptorProto_Edition_field_number protoreflect.FieldNumber = 13 + FileDescriptorProto_Edition_field_number protoreflect.FieldNumber = 14 ) // Names for google.protobuf.DescriptorProto. @@ -184,10 +205,12 @@ const ( const ( ExtensionRangeOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" ExtensionRangeOptions_Declaration_field_name protoreflect.Name = "declaration" + ExtensionRangeOptions_Features_field_name protoreflect.Name = "features" ExtensionRangeOptions_Verification_field_name protoreflect.Name = "verification" ExtensionRangeOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.uninterpreted_option" ExtensionRangeOptions_Declaration_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.declaration" + ExtensionRangeOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.features" ExtensionRangeOptions_Verification_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.verification" ) @@ -195,6 +218,7 @@ const ( const ( ExtensionRangeOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ExtensionRangeOptions_Declaration_field_number protoreflect.FieldNumber = 2 + ExtensionRangeOptions_Features_field_number protoreflect.FieldNumber = 50 ExtensionRangeOptions_Verification_field_number protoreflect.FieldNumber = 3 ) @@ -204,6 +228,12 @@ const ( ExtensionRangeOptions_VerificationState_enum_name = "VerificationState" ) +// Enum values for google.protobuf.ExtensionRangeOptions.VerificationState. +const ( + ExtensionRangeOptions_DECLARATION_enum_value = 0 + ExtensionRangeOptions_UNVERIFIED_enum_value = 1 +) + // Names for google.protobuf.ExtensionRangeOptions.Declaration. const ( ExtensionRangeOptions_Declaration_message_name protoreflect.Name = "Declaration" @@ -212,29 +242,26 @@ const ( // Field names for google.protobuf.ExtensionRangeOptions.Declaration. const ( - ExtensionRangeOptions_Declaration_Number_field_name protoreflect.Name = "number" - ExtensionRangeOptions_Declaration_FullName_field_name protoreflect.Name = "full_name" - ExtensionRangeOptions_Declaration_Type_field_name protoreflect.Name = "type" - ExtensionRangeOptions_Declaration_IsRepeated_field_name protoreflect.Name = "is_repeated" - ExtensionRangeOptions_Declaration_Reserved_field_name protoreflect.Name = "reserved" - ExtensionRangeOptions_Declaration_Repeated_field_name protoreflect.Name = "repeated" + ExtensionRangeOptions_Declaration_Number_field_name protoreflect.Name = "number" + ExtensionRangeOptions_Declaration_FullName_field_name protoreflect.Name = "full_name" + ExtensionRangeOptions_Declaration_Type_field_name protoreflect.Name = "type" + ExtensionRangeOptions_Declaration_Reserved_field_name protoreflect.Name = "reserved" + ExtensionRangeOptions_Declaration_Repeated_field_name protoreflect.Name = "repeated" - ExtensionRangeOptions_Declaration_Number_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.number" - ExtensionRangeOptions_Declaration_FullName_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.full_name" - ExtensionRangeOptions_Declaration_Type_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.type" - ExtensionRangeOptions_Declaration_IsRepeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.is_repeated" - ExtensionRangeOptions_Declaration_Reserved_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.reserved" - ExtensionRangeOptions_Declaration_Repeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.repeated" + ExtensionRangeOptions_Declaration_Number_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.number" + ExtensionRangeOptions_Declaration_FullName_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.full_name" + ExtensionRangeOptions_Declaration_Type_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.type" + ExtensionRangeOptions_Declaration_Reserved_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.reserved" + ExtensionRangeOptions_Declaration_Repeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.repeated" ) // Field numbers for google.protobuf.ExtensionRangeOptions.Declaration. const ( - ExtensionRangeOptions_Declaration_Number_field_number protoreflect.FieldNumber = 1 - ExtensionRangeOptions_Declaration_FullName_field_number protoreflect.FieldNumber = 2 - ExtensionRangeOptions_Declaration_Type_field_number protoreflect.FieldNumber = 3 - ExtensionRangeOptions_Declaration_IsRepeated_field_number protoreflect.FieldNumber = 4 - ExtensionRangeOptions_Declaration_Reserved_field_number protoreflect.FieldNumber = 5 - ExtensionRangeOptions_Declaration_Repeated_field_number protoreflect.FieldNumber = 6 + ExtensionRangeOptions_Declaration_Number_field_number protoreflect.FieldNumber = 1 + ExtensionRangeOptions_Declaration_FullName_field_number protoreflect.FieldNumber = 2 + ExtensionRangeOptions_Declaration_Type_field_number protoreflect.FieldNumber = 3 + ExtensionRangeOptions_Declaration_Reserved_field_number protoreflect.FieldNumber = 5 + ExtensionRangeOptions_Declaration_Repeated_field_number protoreflect.FieldNumber = 6 ) // Names for google.protobuf.FieldDescriptorProto. @@ -291,12 +318,41 @@ const ( FieldDescriptorProto_Type_enum_name = "Type" ) +// Enum values for google.protobuf.FieldDescriptorProto.Type. +const ( + FieldDescriptorProto_TYPE_DOUBLE_enum_value = 1 + FieldDescriptorProto_TYPE_FLOAT_enum_value = 2 + FieldDescriptorProto_TYPE_INT64_enum_value = 3 + FieldDescriptorProto_TYPE_UINT64_enum_value = 4 + FieldDescriptorProto_TYPE_INT32_enum_value = 5 + FieldDescriptorProto_TYPE_FIXED64_enum_value = 6 + FieldDescriptorProto_TYPE_FIXED32_enum_value = 7 + FieldDescriptorProto_TYPE_BOOL_enum_value = 8 + FieldDescriptorProto_TYPE_STRING_enum_value = 9 + FieldDescriptorProto_TYPE_GROUP_enum_value = 10 + FieldDescriptorProto_TYPE_MESSAGE_enum_value = 11 + FieldDescriptorProto_TYPE_BYTES_enum_value = 12 + FieldDescriptorProto_TYPE_UINT32_enum_value = 13 + FieldDescriptorProto_TYPE_ENUM_enum_value = 14 + FieldDescriptorProto_TYPE_SFIXED32_enum_value = 15 + FieldDescriptorProto_TYPE_SFIXED64_enum_value = 16 + FieldDescriptorProto_TYPE_SINT32_enum_value = 17 + FieldDescriptorProto_TYPE_SINT64_enum_value = 18 +) + // Full and short names for google.protobuf.FieldDescriptorProto.Label. const ( FieldDescriptorProto_Label_enum_fullname = "google.protobuf.FieldDescriptorProto.Label" FieldDescriptorProto_Label_enum_name = "Label" ) +// Enum values for google.protobuf.FieldDescriptorProto.Label. +const ( + FieldDescriptorProto_LABEL_OPTIONAL_enum_value = 1 + FieldDescriptorProto_LABEL_REPEATED_enum_value = 3 + FieldDescriptorProto_LABEL_REQUIRED_enum_value = 2 +) + // Names for google.protobuf.OneofDescriptorProto. const ( OneofDescriptorProto_message_name protoreflect.Name = "OneofDescriptorProto" @@ -468,7 +524,6 @@ const ( FileOptions_CcGenericServices_field_name protoreflect.Name = "cc_generic_services" FileOptions_JavaGenericServices_field_name protoreflect.Name = "java_generic_services" FileOptions_PyGenericServices_field_name protoreflect.Name = "py_generic_services" - FileOptions_PhpGenericServices_field_name protoreflect.Name = "php_generic_services" FileOptions_Deprecated_field_name protoreflect.Name = "deprecated" FileOptions_CcEnableArenas_field_name protoreflect.Name = "cc_enable_arenas" FileOptions_ObjcClassPrefix_field_name protoreflect.Name = "objc_class_prefix" @@ -478,6 +533,7 @@ const ( FileOptions_PhpNamespace_field_name protoreflect.Name = "php_namespace" FileOptions_PhpMetadataNamespace_field_name protoreflect.Name = "php_metadata_namespace" FileOptions_RubyPackage_field_name protoreflect.Name = "ruby_package" + FileOptions_Features_field_name protoreflect.Name = "features" FileOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" FileOptions_JavaPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_package" @@ -490,7 +546,6 @@ const ( FileOptions_CcGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.cc_generic_services" FileOptions_JavaGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_generic_services" FileOptions_PyGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.py_generic_services" - FileOptions_PhpGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_generic_services" FileOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.deprecated" FileOptions_CcEnableArenas_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.cc_enable_arenas" FileOptions_ObjcClassPrefix_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.objc_class_prefix" @@ -500,6 +555,7 @@ const ( FileOptions_PhpNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_namespace" FileOptions_PhpMetadataNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_metadata_namespace" FileOptions_RubyPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.ruby_package" + FileOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.features" FileOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.uninterpreted_option" ) @@ -515,7 +571,6 @@ const ( FileOptions_CcGenericServices_field_number protoreflect.FieldNumber = 16 FileOptions_JavaGenericServices_field_number protoreflect.FieldNumber = 17 FileOptions_PyGenericServices_field_number protoreflect.FieldNumber = 18 - FileOptions_PhpGenericServices_field_number protoreflect.FieldNumber = 42 FileOptions_Deprecated_field_number protoreflect.FieldNumber = 23 FileOptions_CcEnableArenas_field_number protoreflect.FieldNumber = 31 FileOptions_ObjcClassPrefix_field_number protoreflect.FieldNumber = 36 @@ -525,6 +580,7 @@ const ( FileOptions_PhpNamespace_field_number protoreflect.FieldNumber = 41 FileOptions_PhpMetadataNamespace_field_number protoreflect.FieldNumber = 44 FileOptions_RubyPackage_field_number protoreflect.FieldNumber = 45 + FileOptions_Features_field_number protoreflect.FieldNumber = 50 FileOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -534,6 +590,13 @@ const ( FileOptions_OptimizeMode_enum_name = "OptimizeMode" ) +// Enum values for google.protobuf.FileOptions.OptimizeMode. +const ( + FileOptions_SPEED_enum_value = 1 + FileOptions_CODE_SIZE_enum_value = 2 + FileOptions_LITE_RUNTIME_enum_value = 3 +) + // Names for google.protobuf.MessageOptions. const ( MessageOptions_message_name protoreflect.Name = "MessageOptions" @@ -547,6 +610,7 @@ const ( MessageOptions_Deprecated_field_name protoreflect.Name = "deprecated" MessageOptions_MapEntry_field_name protoreflect.Name = "map_entry" MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_name protoreflect.Name = "deprecated_legacy_json_field_conflicts" + MessageOptions_Features_field_name protoreflect.Name = "features" MessageOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" MessageOptions_MessageSetWireFormat_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.message_set_wire_format" @@ -554,6 +618,7 @@ const ( MessageOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated" MessageOptions_MapEntry_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.map_entry" MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated_legacy_json_field_conflicts" + MessageOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.features" MessageOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.uninterpreted_option" ) @@ -564,6 +629,7 @@ const ( MessageOptions_Deprecated_field_number protoreflect.FieldNumber = 3 MessageOptions_MapEntry_field_number protoreflect.FieldNumber = 7 MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_number protoreflect.FieldNumber = 11 + MessageOptions_Features_field_number protoreflect.FieldNumber = 12 MessageOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -584,8 +650,9 @@ const ( FieldOptions_Weak_field_name protoreflect.Name = "weak" FieldOptions_DebugRedact_field_name protoreflect.Name = "debug_redact" FieldOptions_Retention_field_name protoreflect.Name = "retention" - FieldOptions_Target_field_name protoreflect.Name = "target" FieldOptions_Targets_field_name protoreflect.Name = "targets" + FieldOptions_EditionDefaults_field_name protoreflect.Name = "edition_defaults" + FieldOptions_Features_field_name protoreflect.Name = "features" FieldOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" FieldOptions_Ctype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.ctype" @@ -597,8 +664,9 @@ const ( FieldOptions_Weak_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.weak" FieldOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.debug_redact" FieldOptions_Retention_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.retention" - FieldOptions_Target_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.target" FieldOptions_Targets_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.targets" + FieldOptions_EditionDefaults_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.edition_defaults" + FieldOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.features" FieldOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.uninterpreted_option" ) @@ -613,8 +681,9 @@ const ( FieldOptions_Weak_field_number protoreflect.FieldNumber = 10 FieldOptions_DebugRedact_field_number protoreflect.FieldNumber = 16 FieldOptions_Retention_field_number protoreflect.FieldNumber = 17 - FieldOptions_Target_field_number protoreflect.FieldNumber = 18 FieldOptions_Targets_field_number protoreflect.FieldNumber = 19 + FieldOptions_EditionDefaults_field_number protoreflect.FieldNumber = 20 + FieldOptions_Features_field_number protoreflect.FieldNumber = 21 FieldOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -624,24 +693,80 @@ const ( FieldOptions_CType_enum_name = "CType" ) +// Enum values for google.protobuf.FieldOptions.CType. +const ( + FieldOptions_STRING_enum_value = 0 + FieldOptions_CORD_enum_value = 1 + FieldOptions_STRING_PIECE_enum_value = 2 +) + // Full and short names for google.protobuf.FieldOptions.JSType. const ( FieldOptions_JSType_enum_fullname = "google.protobuf.FieldOptions.JSType" FieldOptions_JSType_enum_name = "JSType" ) +// Enum values for google.protobuf.FieldOptions.JSType. +const ( + FieldOptions_JS_NORMAL_enum_value = 0 + FieldOptions_JS_STRING_enum_value = 1 + FieldOptions_JS_NUMBER_enum_value = 2 +) + // Full and short names for google.protobuf.FieldOptions.OptionRetention. const ( FieldOptions_OptionRetention_enum_fullname = "google.protobuf.FieldOptions.OptionRetention" FieldOptions_OptionRetention_enum_name = "OptionRetention" ) +// Enum values for google.protobuf.FieldOptions.OptionRetention. +const ( + FieldOptions_RETENTION_UNKNOWN_enum_value = 0 + FieldOptions_RETENTION_RUNTIME_enum_value = 1 + FieldOptions_RETENTION_SOURCE_enum_value = 2 +) + // Full and short names for google.protobuf.FieldOptions.OptionTargetType. const ( FieldOptions_OptionTargetType_enum_fullname = "google.protobuf.FieldOptions.OptionTargetType" FieldOptions_OptionTargetType_enum_name = "OptionTargetType" ) +// Enum values for google.protobuf.FieldOptions.OptionTargetType. +const ( + FieldOptions_TARGET_TYPE_UNKNOWN_enum_value = 0 + FieldOptions_TARGET_TYPE_FILE_enum_value = 1 + FieldOptions_TARGET_TYPE_EXTENSION_RANGE_enum_value = 2 + FieldOptions_TARGET_TYPE_MESSAGE_enum_value = 3 + FieldOptions_TARGET_TYPE_FIELD_enum_value = 4 + FieldOptions_TARGET_TYPE_ONEOF_enum_value = 5 + FieldOptions_TARGET_TYPE_ENUM_enum_value = 6 + FieldOptions_TARGET_TYPE_ENUM_ENTRY_enum_value = 7 + FieldOptions_TARGET_TYPE_SERVICE_enum_value = 8 + FieldOptions_TARGET_TYPE_METHOD_enum_value = 9 +) + +// Names for google.protobuf.FieldOptions.EditionDefault. +const ( + FieldOptions_EditionDefault_message_name protoreflect.Name = "EditionDefault" + FieldOptions_EditionDefault_message_fullname protoreflect.FullName = "google.protobuf.FieldOptions.EditionDefault" +) + +// Field names for google.protobuf.FieldOptions.EditionDefault. +const ( + FieldOptions_EditionDefault_Edition_field_name protoreflect.Name = "edition" + FieldOptions_EditionDefault_Value_field_name protoreflect.Name = "value" + + FieldOptions_EditionDefault_Edition_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.EditionDefault.edition" + FieldOptions_EditionDefault_Value_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.EditionDefault.value" +) + +// Field numbers for google.protobuf.FieldOptions.EditionDefault. +const ( + FieldOptions_EditionDefault_Edition_field_number protoreflect.FieldNumber = 3 + FieldOptions_EditionDefault_Value_field_number protoreflect.FieldNumber = 2 +) + // Names for google.protobuf.OneofOptions. const ( OneofOptions_message_name protoreflect.Name = "OneofOptions" @@ -650,13 +775,16 @@ const ( // Field names for google.protobuf.OneofOptions. const ( + OneofOptions_Features_field_name protoreflect.Name = "features" OneofOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + OneofOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.OneofOptions.features" OneofOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.OneofOptions.uninterpreted_option" ) // Field numbers for google.protobuf.OneofOptions. const ( + OneofOptions_Features_field_number protoreflect.FieldNumber = 1 OneofOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -671,11 +799,13 @@ const ( EnumOptions_AllowAlias_field_name protoreflect.Name = "allow_alias" EnumOptions_Deprecated_field_name protoreflect.Name = "deprecated" EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_name protoreflect.Name = "deprecated_legacy_json_field_conflicts" + EnumOptions_Features_field_name protoreflect.Name = "features" EnumOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" EnumOptions_AllowAlias_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.allow_alias" EnumOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated" EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated_legacy_json_field_conflicts" + EnumOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.features" EnumOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.uninterpreted_option" ) @@ -684,6 +814,7 @@ const ( EnumOptions_AllowAlias_field_number protoreflect.FieldNumber = 2 EnumOptions_Deprecated_field_number protoreflect.FieldNumber = 3 EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_number protoreflect.FieldNumber = 6 + EnumOptions_Features_field_number protoreflect.FieldNumber = 7 EnumOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -696,15 +827,21 @@ const ( // Field names for google.protobuf.EnumValueOptions. const ( EnumValueOptions_Deprecated_field_name protoreflect.Name = "deprecated" + EnumValueOptions_Features_field_name protoreflect.Name = "features" + EnumValueOptions_DebugRedact_field_name protoreflect.Name = "debug_redact" EnumValueOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" EnumValueOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.deprecated" + EnumValueOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.features" + EnumValueOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.debug_redact" EnumValueOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.uninterpreted_option" ) // Field numbers for google.protobuf.EnumValueOptions. const ( EnumValueOptions_Deprecated_field_number protoreflect.FieldNumber = 1 + EnumValueOptions_Features_field_number protoreflect.FieldNumber = 2 + EnumValueOptions_DebugRedact_field_number protoreflect.FieldNumber = 3 EnumValueOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -716,15 +853,18 @@ const ( // Field names for google.protobuf.ServiceOptions. const ( + ServiceOptions_Features_field_name protoreflect.Name = "features" ServiceOptions_Deprecated_field_name protoreflect.Name = "deprecated" ServiceOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + ServiceOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.features" ServiceOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.deprecated" ServiceOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.uninterpreted_option" ) // Field numbers for google.protobuf.ServiceOptions. const ( + ServiceOptions_Features_field_number protoreflect.FieldNumber = 34 ServiceOptions_Deprecated_field_number protoreflect.FieldNumber = 33 ServiceOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -739,10 +879,12 @@ const ( const ( MethodOptions_Deprecated_field_name protoreflect.Name = "deprecated" MethodOptions_IdempotencyLevel_field_name protoreflect.Name = "idempotency_level" + MethodOptions_Features_field_name protoreflect.Name = "features" MethodOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" MethodOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.deprecated" MethodOptions_IdempotencyLevel_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.idempotency_level" + MethodOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.features" MethodOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.uninterpreted_option" ) @@ -750,6 +892,7 @@ const ( const ( MethodOptions_Deprecated_field_number protoreflect.FieldNumber = 33 MethodOptions_IdempotencyLevel_field_number protoreflect.FieldNumber = 34 + MethodOptions_Features_field_number protoreflect.FieldNumber = 35 MethodOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -759,6 +902,13 @@ const ( MethodOptions_IdempotencyLevel_enum_name = "IdempotencyLevel" ) +// Enum values for google.protobuf.MethodOptions.IdempotencyLevel. +const ( + MethodOptions_IDEMPOTENCY_UNKNOWN_enum_value = 0 + MethodOptions_NO_SIDE_EFFECTS_enum_value = 1 + MethodOptions_IDEMPOTENT_enum_value = 2 +) + // Names for google.protobuf.UninterpretedOption. const ( UninterpretedOption_message_name protoreflect.Name = "UninterpretedOption" @@ -816,6 +966,163 @@ const ( UninterpretedOption_NamePart_IsExtension_field_number protoreflect.FieldNumber = 2 ) +// Names for google.protobuf.FeatureSet. +const ( + FeatureSet_message_name protoreflect.Name = "FeatureSet" + FeatureSet_message_fullname protoreflect.FullName = "google.protobuf.FeatureSet" +) + +// Field names for google.protobuf.FeatureSet. +const ( + FeatureSet_FieldPresence_field_name protoreflect.Name = "field_presence" + FeatureSet_EnumType_field_name protoreflect.Name = "enum_type" + FeatureSet_RepeatedFieldEncoding_field_name protoreflect.Name = "repeated_field_encoding" + FeatureSet_Utf8Validation_field_name protoreflect.Name = "utf8_validation" + FeatureSet_MessageEncoding_field_name protoreflect.Name = "message_encoding" + FeatureSet_JsonFormat_field_name protoreflect.Name = "json_format" + + FeatureSet_FieldPresence_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.field_presence" + FeatureSet_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enum_type" + FeatureSet_RepeatedFieldEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.repeated_field_encoding" + FeatureSet_Utf8Validation_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.utf8_validation" + FeatureSet_MessageEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.message_encoding" + FeatureSet_JsonFormat_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.json_format" +) + +// Field numbers for google.protobuf.FeatureSet. +const ( + FeatureSet_FieldPresence_field_number protoreflect.FieldNumber = 1 + FeatureSet_EnumType_field_number protoreflect.FieldNumber = 2 + FeatureSet_RepeatedFieldEncoding_field_number protoreflect.FieldNumber = 3 + FeatureSet_Utf8Validation_field_number protoreflect.FieldNumber = 4 + FeatureSet_MessageEncoding_field_number protoreflect.FieldNumber = 5 + FeatureSet_JsonFormat_field_number protoreflect.FieldNumber = 6 +) + +// Full and short names for google.protobuf.FeatureSet.FieldPresence. +const ( + FeatureSet_FieldPresence_enum_fullname = "google.protobuf.FeatureSet.FieldPresence" + FeatureSet_FieldPresence_enum_name = "FieldPresence" +) + +// Enum values for google.protobuf.FeatureSet.FieldPresence. +const ( + FeatureSet_FIELD_PRESENCE_UNKNOWN_enum_value = 0 + FeatureSet_EXPLICIT_enum_value = 1 + FeatureSet_IMPLICIT_enum_value = 2 + FeatureSet_LEGACY_REQUIRED_enum_value = 3 +) + +// Full and short names for google.protobuf.FeatureSet.EnumType. +const ( + FeatureSet_EnumType_enum_fullname = "google.protobuf.FeatureSet.EnumType" + FeatureSet_EnumType_enum_name = "EnumType" +) + +// Enum values for google.protobuf.FeatureSet.EnumType. +const ( + FeatureSet_ENUM_TYPE_UNKNOWN_enum_value = 0 + FeatureSet_OPEN_enum_value = 1 + FeatureSet_CLOSED_enum_value = 2 +) + +// Full and short names for google.protobuf.FeatureSet.RepeatedFieldEncoding. +const ( + FeatureSet_RepeatedFieldEncoding_enum_fullname = "google.protobuf.FeatureSet.RepeatedFieldEncoding" + FeatureSet_RepeatedFieldEncoding_enum_name = "RepeatedFieldEncoding" +) + +// Enum values for google.protobuf.FeatureSet.RepeatedFieldEncoding. +const ( + FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN_enum_value = 0 + FeatureSet_PACKED_enum_value = 1 + FeatureSet_EXPANDED_enum_value = 2 +) + +// Full and short names for google.protobuf.FeatureSet.Utf8Validation. +const ( + FeatureSet_Utf8Validation_enum_fullname = "google.protobuf.FeatureSet.Utf8Validation" + FeatureSet_Utf8Validation_enum_name = "Utf8Validation" +) + +// Enum values for google.protobuf.FeatureSet.Utf8Validation. +const ( + FeatureSet_UTF8_VALIDATION_UNKNOWN_enum_value = 0 + FeatureSet_VERIFY_enum_value = 2 + FeatureSet_NONE_enum_value = 3 +) + +// Full and short names for google.protobuf.FeatureSet.MessageEncoding. +const ( + FeatureSet_MessageEncoding_enum_fullname = "google.protobuf.FeatureSet.MessageEncoding" + FeatureSet_MessageEncoding_enum_name = "MessageEncoding" +) + +// Enum values for google.protobuf.FeatureSet.MessageEncoding. +const ( + FeatureSet_MESSAGE_ENCODING_UNKNOWN_enum_value = 0 + FeatureSet_LENGTH_PREFIXED_enum_value = 1 + FeatureSet_DELIMITED_enum_value = 2 +) + +// Full and short names for google.protobuf.FeatureSet.JsonFormat. +const ( + FeatureSet_JsonFormat_enum_fullname = "google.protobuf.FeatureSet.JsonFormat" + FeatureSet_JsonFormat_enum_name = "JsonFormat" +) + +// Enum values for google.protobuf.FeatureSet.JsonFormat. +const ( + FeatureSet_JSON_FORMAT_UNKNOWN_enum_value = 0 + FeatureSet_ALLOW_enum_value = 1 + FeatureSet_LEGACY_BEST_EFFORT_enum_value = 2 +) + +// Names for google.protobuf.FeatureSetDefaults. +const ( + FeatureSetDefaults_message_name protoreflect.Name = "FeatureSetDefaults" + FeatureSetDefaults_message_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults" +) + +// Field names for google.protobuf.FeatureSetDefaults. +const ( + FeatureSetDefaults_Defaults_field_name protoreflect.Name = "defaults" + FeatureSetDefaults_MinimumEdition_field_name protoreflect.Name = "minimum_edition" + FeatureSetDefaults_MaximumEdition_field_name protoreflect.Name = "maximum_edition" + + FeatureSetDefaults_Defaults_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.defaults" + FeatureSetDefaults_MinimumEdition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.minimum_edition" + FeatureSetDefaults_MaximumEdition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.maximum_edition" +) + +// Field numbers for google.protobuf.FeatureSetDefaults. +const ( + FeatureSetDefaults_Defaults_field_number protoreflect.FieldNumber = 1 + FeatureSetDefaults_MinimumEdition_field_number protoreflect.FieldNumber = 4 + FeatureSetDefaults_MaximumEdition_field_number protoreflect.FieldNumber = 5 +) + +// Names for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault. +const ( + FeatureSetDefaults_FeatureSetEditionDefault_message_name protoreflect.Name = "FeatureSetEditionDefault" + FeatureSetDefaults_FeatureSetEditionDefault_message_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault" +) + +// Field names for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault. +const ( + FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_name protoreflect.Name = "edition" + FeatureSetDefaults_FeatureSetEditionDefault_Features_field_name protoreflect.Name = "features" + + FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition" + FeatureSetDefaults_FeatureSetEditionDefault_Features_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.features" +) + +// Field numbers for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault. +const ( + FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_number protoreflect.FieldNumber = 3 + FeatureSetDefaults_FeatureSetEditionDefault_Features_field_number protoreflect.FieldNumber = 2 +) + // Names for google.protobuf.SourceCodeInfo. const ( SourceCodeInfo_message_name protoreflect.Name = "SourceCodeInfo" @@ -917,3 +1224,10 @@ const ( GeneratedCodeInfo_Annotation_Semantic_enum_fullname = "google.protobuf.GeneratedCodeInfo.Annotation.Semantic" GeneratedCodeInfo_Annotation_Semantic_enum_name = "Semantic" ) + +// Enum values for google.protobuf.GeneratedCodeInfo.Annotation.Semantic. +const ( + GeneratedCodeInfo_Annotation_NONE_enum_value = 0 + GeneratedCodeInfo_Annotation_SET_enum_value = 1 + GeneratedCodeInfo_Annotation_ALIAS_enum_value = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go new file mode 100644 index 00000000..fd9015e8 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go @@ -0,0 +1,31 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_reflect_protodesc_proto_go_features_proto = "reflect/protodesc/proto/go_features.proto" + +// Names for google.protobuf.GoFeatures. +const ( + GoFeatures_message_name protoreflect.Name = "GoFeatures" + GoFeatures_message_fullname protoreflect.FullName = "google.protobuf.GoFeatures" +) + +// Field names for google.protobuf.GoFeatures. +const ( + GoFeatures_LegacyUnmarshalJsonEnum_field_name protoreflect.Name = "legacy_unmarshal_json_enum" + + GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "google.protobuf.GoFeatures.legacy_unmarshal_json_enum" +) + +// Field numbers for google.protobuf.GoFeatures. +const ( + GoFeatures_LegacyUnmarshalJsonEnum_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go b/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go index 1a38944b..ad6f80c4 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go @@ -18,6 +18,11 @@ const ( NullValue_enum_name = "NullValue" ) +// Enum values for google.protobuf.NullValue. +const ( + NullValue_NULL_VALUE_enum_value = 0 +) + // Names for google.protobuf.Struct. const ( Struct_message_name protoreflect.Name = "Struct" diff --git a/vendor/google.golang.org/protobuf/internal/genid/type_gen.go b/vendor/google.golang.org/protobuf/internal/genid/type_gen.go index e0f75fea..49bc73e2 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/type_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/type_gen.go @@ -18,6 +18,13 @@ const ( Syntax_enum_name = "Syntax" ) +// Enum values for google.protobuf.Syntax. +const ( + Syntax_SYNTAX_PROTO2_enum_value = 0 + Syntax_SYNTAX_PROTO3_enum_value = 1 + Syntax_SYNTAX_EDITIONS_enum_value = 2 +) + // Names for google.protobuf.Type. const ( Type_message_name protoreflect.Name = "Type" @@ -105,12 +112,43 @@ const ( Field_Kind_enum_name = "Kind" ) +// Enum values for google.protobuf.Field.Kind. +const ( + Field_TYPE_UNKNOWN_enum_value = 0 + Field_TYPE_DOUBLE_enum_value = 1 + Field_TYPE_FLOAT_enum_value = 2 + Field_TYPE_INT64_enum_value = 3 + Field_TYPE_UINT64_enum_value = 4 + Field_TYPE_INT32_enum_value = 5 + Field_TYPE_FIXED64_enum_value = 6 + Field_TYPE_FIXED32_enum_value = 7 + Field_TYPE_BOOL_enum_value = 8 + Field_TYPE_STRING_enum_value = 9 + Field_TYPE_GROUP_enum_value = 10 + Field_TYPE_MESSAGE_enum_value = 11 + Field_TYPE_BYTES_enum_value = 12 + Field_TYPE_UINT32_enum_value = 13 + Field_TYPE_ENUM_enum_value = 14 + Field_TYPE_SFIXED32_enum_value = 15 + Field_TYPE_SFIXED64_enum_value = 16 + Field_TYPE_SINT32_enum_value = 17 + Field_TYPE_SINT64_enum_value = 18 +) + // Full and short names for google.protobuf.Field.Cardinality. const ( Field_Cardinality_enum_fullname = "google.protobuf.Field.Cardinality" Field_Cardinality_enum_name = "Cardinality" ) +// Enum values for google.protobuf.Field.Cardinality. +const ( + Field_CARDINALITY_UNKNOWN_enum_value = 0 + Field_CARDINALITY_OPTIONAL_enum_value = 1 + Field_CARDINALITY_REQUIRED_enum_value = 2 + Field_CARDINALITY_REPEATED_enum_value = 3 +) + // Names for google.protobuf.Enum. const ( Enum_message_name protoreflect.Name = "Enum" diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go index e74cefdc..2b8f122c 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go @@ -21,26 +21,18 @@ type extensionFieldInfo struct { validation validationInfo } -var legacyExtensionFieldInfoCache sync.Map // map[protoreflect.ExtensionType]*extensionFieldInfo - func getExtensionFieldInfo(xt protoreflect.ExtensionType) *extensionFieldInfo { if xi, ok := xt.(*ExtensionInfo); ok { xi.lazyInit() return xi.info } - return legacyLoadExtensionFieldInfo(xt) -} - -// legacyLoadExtensionFieldInfo dynamically loads a *ExtensionInfo for xt. -func legacyLoadExtensionFieldInfo(xt protoreflect.ExtensionType) *extensionFieldInfo { - if xi, ok := legacyExtensionFieldInfoCache.Load(xt); ok { - return xi.(*extensionFieldInfo) - } - e := makeExtensionFieldInfo(xt.TypeDescriptor()) - if e, ok := legacyMessageTypeCache.LoadOrStore(xt, e); ok { - return e.(*extensionFieldInfo) - } - return e + // Ideally we'd cache the resulting *extensionFieldInfo so we don't have to + // recompute this metadata repeatedly. But without support for something like + // weak references, such a cache would pin temporary values (like dynamic + // extension types, constructed for the duration of a user request) to the + // heap forever, causing memory usage of the cache to grow unbounded. + // See discussion in https://github.com/golang/protobuf/issues/1521. + return makeExtensionFieldInfo(xt.TypeDescriptor()) } func makeExtensionFieldInfo(xd protoreflect.ExtensionDescriptor) *extensionFieldInfo { diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go index 1a509b63..f55dc01e 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go @@ -162,11 +162,20 @@ func appendBoolSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions func consumeBoolSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.BoolSlice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growBoolSlice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -732,11 +741,20 @@ func appendInt32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOption func consumeInt32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growInt32Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -1138,11 +1156,20 @@ func appendSint32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio func consumeSint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growInt32Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -1544,11 +1571,20 @@ func appendUint32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio func consumeUint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growUint32Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -1950,11 +1986,20 @@ func appendInt64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOption func consumeInt64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growInt64Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -2356,11 +2401,20 @@ func appendSint64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio func consumeSint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growInt64Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -2762,11 +2816,20 @@ func appendUint64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio func consumeUint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growUint64Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -3145,11 +3208,15 @@ func appendSfixed32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOpt func consumeSfixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed32() + if count > 0 { + p.growInt32Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { @@ -3461,11 +3528,15 @@ func appendFixed32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOpti func consumeFixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed32() + if count > 0 { + p.growUint32Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { @@ -3777,11 +3848,15 @@ func appendFloatSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOption func consumeFloatSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Float32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed32() + if count > 0 { + p.growFloat32Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { @@ -4093,11 +4168,15 @@ func appendSfixed64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOpt func consumeSfixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed64() + if count > 0 { + p.growInt64Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { @@ -4409,11 +4488,15 @@ func appendFixed64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOpti func consumeFixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed64() + if count > 0 { + p.growUint64Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { @@ -4725,11 +4808,15 @@ func appendDoubleSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio func consumeDoubleSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Float64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed64() + if count > 0 { + p.growFloat64Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go index 576dcf3a..13077751 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go @@ -197,7 +197,7 @@ func fieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) (*MessageInfo, return getMessageInfo(ft), makeMessageFieldCoder(fd, ft) case fd.Kind() == protoreflect.GroupKind: return getMessageInfo(ft), makeGroupFieldCoder(fd, ft) - case fd.Syntax() == protoreflect.Proto3 && fd.ContainingOneof() == nil: + case !fd.HasPresence() && fd.ContainingOneof() == nil: // Populated oneof fields always encode even if set to the zero value, // which normally are not encoded in proto3. switch fd.Kind() { diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go index 61c483fa..2ab2c629 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go @@ -206,13 +206,18 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName // Obtain a list of oneof wrapper types. var oneofWrappers []reflect.Type - for _, method := range []string{"XXX_OneofFuncs", "XXX_OneofWrappers"} { - if fn, ok := t.MethodByName(method); ok { - for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { - if vs, ok := v.Interface().([]interface{}); ok { - for _, v := range vs { - oneofWrappers = append(oneofWrappers, reflect.TypeOf(v)) - } + methods := make([]reflect.Method, 0, 2) + if m, ok := t.MethodByName("XXX_OneofFuncs"); ok { + methods = append(methods, m) + } + if m, ok := t.MethodByName("XXX_OneofWrappers"); ok { + methods = append(methods, m) + } + for _, fn := range methods { + for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { + if vs, ok := v.Interface().([]interface{}); ok { + for _, v := range vs { + oneofWrappers = append(oneofWrappers, reflect.TypeOf(v)) } } } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go index 4f5fb67a..629bacdc 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -192,12 +192,17 @@ fieldLoop: // Derive a mapping of oneof wrappers to fields. oneofWrappers := mi.OneofWrappers - for _, method := range []string{"XXX_OneofFuncs", "XXX_OneofWrappers"} { - if fn, ok := reflect.PtrTo(t).MethodByName(method); ok { - for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { - if vs, ok := v.Interface().([]interface{}); ok { - oneofWrappers = vs - } + methods := make([]reflect.Method, 0, 2) + if m, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok { + methods = append(methods, m) + } + if m, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok { + methods = append(methods, m) + } + for _, fn := range methods { + for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { + if vs, ok := v.Interface().([]interface{}); ok { + oneofWrappers = vs } } } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go index 5e736c60..986322b1 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go @@ -538,6 +538,6 @@ func isZero(v reflect.Value) bool { } return true default: - panic(&reflect.ValueError{"reflect.Value.IsZero", v.Kind()}) + panic(&reflect.ValueError{Method: "reflect.Value.IsZero", Kind: v.Kind()}) } } diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go index 4c491bdf..517e9443 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go @@ -159,6 +159,42 @@ func (p pointer) SetPointer(v pointer) { p.v.Elem().Set(v.v) } +func growSlice(p pointer, addCap int) { + // TODO: Once we only support Go 1.20 and newer, use reflect.Grow. + in := p.v.Elem() + out := reflect.MakeSlice(in.Type(), in.Len(), in.Len()+addCap) + reflect.Copy(out, in) + p.v.Elem().Set(out) +} + +func (p pointer) growBoolSlice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growInt32Slice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growUint32Slice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growInt64Slice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growUint64Slice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growFloat64Slice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growFloat32Slice(addCap int) { + growSlice(p, addCap) +} + func (Export) MessageStateOf(p Pointer) *messageState { panic("not supported") } func (ms *messageState) pointer() pointer { panic("not supported") } func (ms *messageState) messageInfo() *MessageInfo { panic("not supported") } diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index ee0e0573..4b020e31 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -138,6 +138,46 @@ func (p pointer) SetPointer(v pointer) { *(*unsafe.Pointer)(p.p) = (unsafe.Pointer)(v.p) } +func (p pointer) growBoolSlice(addCap int) { + sp := p.BoolSlice() + s := make([]bool, 0, addCap+len(*sp)) + s = s[:len(*sp)] + copy(s, *sp) + *sp = s +} + +func (p pointer) growInt32Slice(addCap int) { + sp := p.Int32Slice() + s := make([]int32, 0, addCap+len(*sp)) + s = s[:len(*sp)] + copy(s, *sp) + *sp = s +} + +func (p pointer) growUint32Slice(addCap int) { + p.growInt32Slice(addCap) +} + +func (p pointer) growFloat32Slice(addCap int) { + p.growInt32Slice(addCap) +} + +func (p pointer) growInt64Slice(addCap int) { + sp := p.Int64Slice() + s := make([]int64, 0, addCap+len(*sp)) + s = s[:len(*sp)] + copy(s, *sp) + *sp = s +} + +func (p pointer) growUint64Slice(addCap int) { + p.growInt64Slice(addCap) +} + +func (p pointer) growFloat64Slice(addCap int) { + p.growInt64Slice(addCap) +} + // Static check that MessageState does not exceed the size of a pointer. const _ = uint(unsafe.Sizeof(unsafe.Pointer(nil)) - unsafe.Sizeof(MessageState{})) diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings.go b/vendor/google.golang.org/protobuf/internal/strs/strings.go index 0b74e765..a6e7df24 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings.go @@ -17,7 +17,7 @@ import ( // EnforceUTF8 reports whether to enforce strict UTF-8 validation. func EnforceUTF8(fd protoreflect.FieldDescriptor) bool { - if flags.ProtoLegacy { + if flags.ProtoLegacy || fd.Syntax() == protoreflect.Editions { if fd, ok := fd.(interface{ EnforceUTF8() bool }); ok { return fd.EnforceUTF8() } diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go similarity index 96% rename from vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go rename to vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go index 61a84d34..a008acd0 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine -// +build !purego,!appengine +//go:build !purego && !appengine && !go1.21 +// +build !purego,!appengine,!go1.21 package strs diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go new file mode 100644 index 00000000..60166f2b --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go @@ -0,0 +1,74 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !purego && !appengine && go1.21 +// +build !purego,!appengine,go1.21 + +package strs + +import ( + "unsafe" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +// UnsafeString returns an unsafe string reference of b. +// The caller must treat the input slice as immutable. +// +// WARNING: Use carefully. The returned result must not leak to the end user +// unless the input slice is provably immutable. +func UnsafeString(b []byte) string { + return unsafe.String(unsafe.SliceData(b), len(b)) +} + +// UnsafeBytes returns an unsafe bytes slice reference of s. +// The caller must treat returned slice as immutable. +// +// WARNING: Use carefully. The returned result must not leak to the end user. +func UnsafeBytes(s string) []byte { + return unsafe.Slice(unsafe.StringData(s), len(s)) +} + +// Builder builds a set of strings with shared lifetime. +// This differs from strings.Builder, which is for building a single string. +type Builder struct { + buf []byte +} + +// AppendFullName is equivalent to protoreflect.FullName.Append, +// but optimized for large batches where each name has a shared lifetime. +func (sb *Builder) AppendFullName(prefix protoreflect.FullName, name protoreflect.Name) protoreflect.FullName { + n := len(prefix) + len(".") + len(name) + if len(prefix) == 0 { + n -= len(".") + } + sb.grow(n) + sb.buf = append(sb.buf, prefix...) + sb.buf = append(sb.buf, '.') + sb.buf = append(sb.buf, name...) + return protoreflect.FullName(sb.last(n)) +} + +// MakeString is equivalent to string(b), but optimized for large batches +// with a shared lifetime. +func (sb *Builder) MakeString(b []byte) string { + sb.grow(len(b)) + sb.buf = append(sb.buf, b...) + return sb.last(len(b)) +} + +func (sb *Builder) grow(n int) { + if cap(sb.buf)-len(sb.buf) >= n { + return + } + + // Unlike strings.Builder, we do not need to copy over the contents + // of the old buffer since our builder provides no API for + // retrieving previously created strings. + sb.buf = make([]byte, 0, 2*(cap(sb.buf)+n)) +} + +func (sb *Builder) last(n int) string { + return UnsafeString(sb.buf[len(sb.buf)-n:]) +} diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index 0999f29d..a50fcfb4 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -51,7 +51,7 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 31 + Minor = 33 Patch = 0 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go index 48d47946..e5b03b56 100644 --- a/vendor/google.golang.org/protobuf/proto/decode.go +++ b/vendor/google.golang.org/protobuf/proto/decode.go @@ -69,7 +69,7 @@ func (o UnmarshalOptions) Unmarshal(b []byte, m Message) error { // UnmarshalState parses a wire-format message and places the result in m. // // This method permits fine-grained control over the unmarshaler. -// Most users should use Unmarshal instead. +// Most users should use [Unmarshal] instead. func (o UnmarshalOptions) UnmarshalState(in protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { if o.RecursionLimit == 0 { o.RecursionLimit = protowire.DefaultRecursionLimit diff --git a/vendor/google.golang.org/protobuf/proto/doc.go b/vendor/google.golang.org/protobuf/proto/doc.go index ec71e717..80ed16a0 100644 --- a/vendor/google.golang.org/protobuf/proto/doc.go +++ b/vendor/google.golang.org/protobuf/proto/doc.go @@ -18,27 +18,27 @@ // This package contains functions to convert to and from the wire format, // an efficient binary serialization of protocol buffers. // -// • Size reports the size of a message in the wire format. +// - [Size] reports the size of a message in the wire format. // -// • Marshal converts a message to the wire format. -// The MarshalOptions type provides more control over wire marshaling. +// - [Marshal] converts a message to the wire format. +// The [MarshalOptions] type provides more control over wire marshaling. // -// • Unmarshal converts a message from the wire format. -// The UnmarshalOptions type provides more control over wire unmarshaling. +// - [Unmarshal] converts a message from the wire format. +// The [UnmarshalOptions] type provides more control over wire unmarshaling. // // # Basic message operations // -// • Clone makes a deep copy of a message. +// - [Clone] makes a deep copy of a message. // -// • Merge merges the content of a message into another. +// - [Merge] merges the content of a message into another. // -// • Equal compares two messages. For more control over comparisons -// and detailed reporting of differences, see package -// "google.golang.org/protobuf/testing/protocmp". +// - [Equal] compares two messages. For more control over comparisons +// and detailed reporting of differences, see package +// [google.golang.org/protobuf/testing/protocmp]. // -// • Reset clears the content of a message. +// - [Reset] clears the content of a message. // -// • CheckInitialized reports whether all required fields in a message are set. +// - [CheckInitialized] reports whether all required fields in a message are set. // // # Optional scalar constructors // @@ -46,9 +46,9 @@ // as pointers to a value. For example, an optional string field has the // Go type *string. // -// • Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, and String -// take a value and return a pointer to a new instance of it, -// to simplify construction of optional field values. +// - [Bool], [Int32], [Int64], [Uint32], [Uint64], [Float32], [Float64], and [String] +// take a value and return a pointer to a new instance of it, +// to simplify construction of optional field values. // // Generated enum types usually have an Enum method which performs the // same operation. @@ -57,29 +57,29 @@ // // # Extension accessors // -// • HasExtension, GetExtension, SetExtension, and ClearExtension -// access extension field values in a protocol buffer message. +// - [HasExtension], [GetExtension], [SetExtension], and [ClearExtension] +// access extension field values in a protocol buffer message. // // Extension fields are only supported in proto2. // // # Related packages // -// • Package "google.golang.org/protobuf/encoding/protojson" converts messages to -// and from JSON. +// - Package [google.golang.org/protobuf/encoding/protojson] converts messages to +// and from JSON. // -// • Package "google.golang.org/protobuf/encoding/prototext" converts messages to -// and from the text format. +// - Package [google.golang.org/protobuf/encoding/prototext] converts messages to +// and from the text format. // -// • Package "google.golang.org/protobuf/reflect/protoreflect" provides a -// reflection interface for protocol buffer data types. +// - Package [google.golang.org/protobuf/reflect/protoreflect] provides a +// reflection interface for protocol buffer data types. // -// • Package "google.golang.org/protobuf/testing/protocmp" provides features -// to compare protocol buffer messages with the "github.com/google/go-cmp/cmp" -// package. +// - Package [google.golang.org/protobuf/testing/protocmp] provides features +// to compare protocol buffer messages with the [github.com/google/go-cmp/cmp] +// package. // -// • Package "google.golang.org/protobuf/types/dynamicpb" provides a dynamic -// message type, suitable for working with messages where the protocol buffer -// type is only known at runtime. +// - Package [google.golang.org/protobuf/types/dynamicpb] provides a dynamic +// message type, suitable for working with messages where the protocol buffer +// type is only known at runtime. // // This module contains additional packages for more specialized use cases. // Consult the individual package documentation for details. diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go index bf7f816d..4fed202f 100644 --- a/vendor/google.golang.org/protobuf/proto/encode.go +++ b/vendor/google.golang.org/protobuf/proto/encode.go @@ -129,7 +129,7 @@ func (o MarshalOptions) MarshalAppend(b []byte, m Message) ([]byte, error) { // MarshalState returns the wire-format encoding of a message. // // This method permits fine-grained control over the marshaler. -// Most users should use Marshal instead. +// Most users should use [Marshal] instead. func (o MarshalOptions) MarshalState(in protoiface.MarshalInput) (protoiface.MarshalOutput, error) { return o.marshal(in.Buf, in.Message) } diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go index 5f293cda..17899a3a 100644 --- a/vendor/google.golang.org/protobuf/proto/extension.go +++ b/vendor/google.golang.org/protobuf/proto/extension.go @@ -26,7 +26,7 @@ func HasExtension(m Message, xt protoreflect.ExtensionType) bool { } // ClearExtension clears an extension field such that subsequent -// HasExtension calls return false. +// [HasExtension] calls return false. // It panics if m is invalid or if xt does not extend m. func ClearExtension(m Message, xt protoreflect.ExtensionType) { m.ProtoReflect().Clear(xt.TypeDescriptor()) diff --git a/vendor/google.golang.org/protobuf/proto/merge.go b/vendor/google.golang.org/protobuf/proto/merge.go index d761ab33..3c6fe578 100644 --- a/vendor/google.golang.org/protobuf/proto/merge.go +++ b/vendor/google.golang.org/protobuf/proto/merge.go @@ -21,7 +21,7 @@ import ( // The unknown fields of src are appended to the unknown fields of dst. // // It is semantically equivalent to unmarshaling the encoded form of src -// into dst with the UnmarshalOptions.Merge option specified. +// into dst with the [UnmarshalOptions.Merge] option specified. func Merge(dst, src Message) { // TODO: Should nil src be treated as semantically equivalent to a // untyped, read-only, empty message? What about a nil dst? diff --git a/vendor/google.golang.org/protobuf/proto/proto.go b/vendor/google.golang.org/protobuf/proto/proto.go index 1f0d183b..7543ee6b 100644 --- a/vendor/google.golang.org/protobuf/proto/proto.go +++ b/vendor/google.golang.org/protobuf/proto/proto.go @@ -15,18 +15,20 @@ import ( // protobuf module that accept a Message, except where otherwise specified. // // This is the v2 interface definition for protobuf messages. -// The v1 interface definition is "github.com/golang/protobuf/proto".Message. +// The v1 interface definition is [github.com/golang/protobuf/proto.Message]. // -// To convert a v1 message to a v2 message, -// use "github.com/golang/protobuf/proto".MessageV2. -// To convert a v2 message to a v1 message, -// use "github.com/golang/protobuf/proto".MessageV1. +// - To convert a v1 message to a v2 message, +// use [google.golang.org/protobuf/protoadapt.MessageV2Of]. +// - To convert a v2 message to a v1 message, +// use [google.golang.org/protobuf/protoadapt.MessageV1Of]. type Message = protoreflect.ProtoMessage -// Error matches all errors produced by packages in the protobuf module. +// Error matches all errors produced by packages in the protobuf module +// according to [errors.Is]. // -// That is, errors.Is(err, Error) reports whether an error is produced -// by this module. +// Example usage: +// +// if errors.Is(err, proto.Error) { ... } var Error error func init() { diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go index e4dfb120..baa0cc62 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go @@ -3,11 +3,11 @@ // license that can be found in the LICENSE file. // Package protodesc provides functionality for converting -// FileDescriptorProto messages to/from protoreflect.FileDescriptor values. +// FileDescriptorProto messages to/from [protoreflect.FileDescriptor] values. // // The google.protobuf.FileDescriptorProto is a protobuf message that describes // the type information for a .proto file in a form that is easily serializable. -// The protoreflect.FileDescriptor is a more structured representation of +// The [protoreflect.FileDescriptor] is a more structured representation of // the FileDescriptorProto message where references and remote dependencies // can be directly followed. package protodesc @@ -24,11 +24,11 @@ import ( "google.golang.org/protobuf/types/descriptorpb" ) -// Resolver is the resolver used by NewFile to resolve dependencies. +// Resolver is the resolver used by [NewFile] to resolve dependencies. // The enums and messages provided must belong to some parent file, // which is also registered. // -// It is implemented by protoregistry.Files. +// It is implemented by [protoregistry.Files]. type Resolver interface { FindFileByPath(string) (protoreflect.FileDescriptor, error) FindDescriptorByName(protoreflect.FullName) (protoreflect.Descriptor, error) @@ -61,19 +61,19 @@ type FileOptions struct { AllowUnresolvable bool } -// NewFile creates a new protoreflect.FileDescriptor from the provided -// file descriptor message. See FileOptions.New for more information. +// NewFile creates a new [protoreflect.FileDescriptor] from the provided +// file descriptor message. See [FileOptions.New] for more information. func NewFile(fd *descriptorpb.FileDescriptorProto, r Resolver) (protoreflect.FileDescriptor, error) { return FileOptions{}.New(fd, r) } -// NewFiles creates a new protoregistry.Files from the provided -// FileDescriptorSet message. See FileOptions.NewFiles for more information. +// NewFiles creates a new [protoregistry.Files] from the provided +// FileDescriptorSet message. See [FileOptions.NewFiles] for more information. func NewFiles(fd *descriptorpb.FileDescriptorSet) (*protoregistry.Files, error) { return FileOptions{}.NewFiles(fd) } -// New creates a new protoreflect.FileDescriptor from the provided +// New creates a new [protoreflect.FileDescriptor] from the provided // file descriptor message. The file must represent a valid proto file according // to protobuf semantics. The returned descriptor is a deep copy of the input. // @@ -93,9 +93,15 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot f.L1.Syntax = protoreflect.Proto2 case "proto3": f.L1.Syntax = protoreflect.Proto3 + case "editions": + f.L1.Syntax = protoreflect.Editions + f.L1.Edition = fromEditionProto(fd.GetEdition()) default: return nil, errors.New("invalid syntax: %q", fd.GetSyntax()) } + if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < SupportedEditionsMinimum || fd.GetEdition() > SupportedEditionsMaximum) { + return nil, errors.New("use of edition %v not yet supported by the Go Protobuf runtime", fd.GetEdition()) + } f.L1.Path = fd.GetName() if f.L1.Path == "" { return nil, errors.New("file path must be populated") @@ -108,6 +114,9 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot opts = proto.Clone(opts).(*descriptorpb.FileOptions) f.L2.Options = func() protoreflect.ProtoMessage { return opts } } + if f.L1.Syntax == protoreflect.Editions { + initFileDescFromFeatureSet(f, fd.GetOptions().GetFeatures()) + } f.L2.Imports = make(filedesc.FileImports, len(fd.GetDependency())) for _, i := range fd.GetPublicDependency() { @@ -231,7 +240,7 @@ func (is importSet) importPublic(imps protoreflect.FileImports) { } } -// NewFiles creates a new protoregistry.Files from the provided +// NewFiles creates a new [protoregistry.Files] from the provided // FileDescriptorSet message. The descriptor set must include only // valid files according to protobuf semantics. The returned descriptors // are a deep copy of the input. diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go index 37efda1a..b3278163 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go @@ -28,6 +28,7 @@ func (r descsByName) initEnumDeclarations(eds []*descriptorpb.EnumDescriptorProt opts = proto.Clone(opts).(*descriptorpb.EnumOptions) e.L2.Options = func() protoreflect.ProtoMessage { return opts } } + e.L1.EditionFeatures = mergeEditionFeatures(parent, ed.GetOptions().GetFeatures()) for _, s := range ed.GetReservedName() { e.L2.ReservedNames.List = append(e.L2.ReservedNames.List, protoreflect.Name(s)) } @@ -68,6 +69,9 @@ func (r descsByName) initMessagesDeclarations(mds []*descriptorpb.DescriptorProt if m.L0, err = r.makeBase(m, parent, md.GetName(), i, sb); err != nil { return nil, err } + if m.Base.L0.ParentFile.Syntax() == protoreflect.Editions { + m.L1.EditionFeatures = mergeEditionFeatures(parent, md.GetOptions().GetFeatures()) + } if opts := md.GetOptions(); opts != nil { opts = proto.Clone(opts).(*descriptorpb.MessageOptions) m.L2.Options = func() protoreflect.ProtoMessage { return opts } @@ -114,6 +118,27 @@ func (r descsByName) initMessagesDeclarations(mds []*descriptorpb.DescriptorProt return ms, nil } +// canBePacked returns whether the field can use packed encoding: +// https://protobuf.dev/programming-guides/encoding/#packed +func canBePacked(fd *descriptorpb.FieldDescriptorProto) bool { + if fd.GetLabel() != descriptorpb.FieldDescriptorProto_LABEL_REPEATED { + return false // not a repeated field + } + + switch protoreflect.Kind(fd.GetType()) { + case protoreflect.MessageKind, protoreflect.GroupKind: + return false // not a scalar type field + + case protoreflect.StringKind, protoreflect.BytesKind: + // string and bytes can explicitly not be declared as packed, + // see https://protobuf.dev/programming-guides/encoding/#packed + return false + + default: + return true + } +} + func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (fs []filedesc.Field, err error) { fs = make([]filedesc.Field, len(fds)) // allocate up-front to ensure stable pointers for i, fd := range fds { @@ -137,6 +162,34 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc if fd.JsonName != nil { f.L1.StringName.InitJSON(fd.GetJsonName()) } + + if f.Base.L0.ParentFile.Syntax() == protoreflect.Editions { + f.L1.EditionFeatures = mergeEditionFeatures(parent, fd.GetOptions().GetFeatures()) + + if f.L1.EditionFeatures.IsLegacyRequired { + f.L1.Cardinality = protoreflect.Required + } + // We reuse the existing field because the old option `[packed = + // true]` is mutually exclusive with the editions feature. + if canBePacked(fd) { + f.L1.HasPacked = true + f.L1.IsPacked = f.L1.EditionFeatures.IsPacked + } + + // We pretend this option is always explicitly set because the only + // use of HasEnforceUTF8 is to determine whether to use EnforceUTF8 + // or to return the appropriate default. + // When using editions we either parse the option or resolve the + // appropriate default here (instead of later when this option is + // requested from the descriptor). + // In proto2/proto3 syntax HasEnforceUTF8 might be false. + f.L1.HasEnforceUTF8 = true + f.L1.EnforceUTF8 = f.L1.EditionFeatures.IsUTF8Validated + + if f.L1.Kind == protoreflect.MessageKind && f.L1.EditionFeatures.IsDelimitedEncoded { + f.L1.Kind = protoreflect.GroupKind + } + } } return fs, nil } @@ -151,6 +204,9 @@ func (r descsByName) initOneofsFromDescriptorProto(ods []*descriptorpb.OneofDesc if opts := od.GetOptions(); opts != nil { opts = proto.Clone(opts).(*descriptorpb.OneofOptions) o.L1.Options = func() protoreflect.ProtoMessage { return opts } + if parent.Syntax() == protoreflect.Editions { + o.L1.EditionFeatures = mergeEditionFeatures(parent, opts.GetFeatures()) + } } } return os, nil diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go index 27d7e350..254ca585 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go @@ -276,8 +276,8 @@ func unmarshalDefault(s string, fd protoreflect.FieldDescriptor, allowUnresolvab } else if err != nil { return v, ev, err } - if fd.Syntax() == protoreflect.Proto3 { - return v, ev, errors.New("cannot be specified under proto3 semantics") + if !fd.HasPresence() { + return v, ev, errors.New("cannot be specified with implicit field presence") } if fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind || fd.Cardinality() == protoreflect.Repeated { return v, ev, errors.New("cannot be specified on composite types") diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go index 9af1d564..e4dcaf87 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go @@ -107,7 +107,7 @@ func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.Desc if isMessageSet && !flags.ProtoLegacy { return errors.New("message %q is a MessageSet, which is a legacy proto1 feature that is no longer supported", m.FullName()) } - if isMessageSet && (m.Syntax() != protoreflect.Proto2 || m.Fields().Len() > 0 || m.ExtensionRanges().Len() == 0) { + if isMessageSet && (m.Syntax() == protoreflect.Proto3 || m.Fields().Len() > 0 || m.ExtensionRanges().Len() == 0) { return errors.New("message %q is an invalid proto1 MessageSet", m.FullName()) } if m.Syntax() == protoreflect.Proto3 { @@ -314,8 +314,8 @@ func checkValidGroup(fd protoreflect.FieldDescriptor) error { switch { case fd.Kind() != protoreflect.GroupKind: return nil - case fd.Syntax() != protoreflect.Proto2: - return errors.New("invalid under proto2 semantics") + case fd.Syntax() == protoreflect.Proto3: + return errors.New("invalid under proto3 semantics") case md == nil || md.IsPlaceholder(): return errors.New("message must be resolvable") case fd.FullName().Parent() != md.FullName().Parent(): diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go new file mode 100644 index 00000000..2a6b29d1 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go @@ -0,0 +1,148 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protodesc + +import ( + "fmt" + "os" + "sync" + + "google.golang.org/protobuf/internal/editiondefaults" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/types/descriptorpb" + gofeaturespb "google.golang.org/protobuf/types/gofeaturespb" +) + +const ( + SupportedEditionsMinimum = descriptorpb.Edition_EDITION_PROTO2 + SupportedEditionsMaximum = descriptorpb.Edition_EDITION_2023 +) + +var defaults = &descriptorpb.FeatureSetDefaults{} +var defaultsCacheMu sync.Mutex +var defaultsCache = make(map[filedesc.Edition]*descriptorpb.FeatureSet) + +func init() { + err := proto.Unmarshal(editiondefaults.Defaults, defaults) + if err != nil { + fmt.Fprintf(os.Stderr, "unmarshal editions defaults: %v\n", err) + os.Exit(1) + } +} + +func fromEditionProto(epb descriptorpb.Edition) filedesc.Edition { + return filedesc.Edition(epb) +} + +func toEditionProto(ed filedesc.Edition) descriptorpb.Edition { + switch ed { + case filedesc.EditionUnknown: + return descriptorpb.Edition_EDITION_UNKNOWN + case filedesc.EditionProto2: + return descriptorpb.Edition_EDITION_PROTO2 + case filedesc.EditionProto3: + return descriptorpb.Edition_EDITION_PROTO3 + case filedesc.Edition2023: + return descriptorpb.Edition_EDITION_2023 + default: + panic(fmt.Sprintf("unknown value for edition: %v", ed)) + } +} + +func getFeatureSetFor(ed filedesc.Edition) *descriptorpb.FeatureSet { + defaultsCacheMu.Lock() + defer defaultsCacheMu.Unlock() + if def, ok := defaultsCache[ed]; ok { + return def + } + edpb := toEditionProto(ed) + if defaults.GetMinimumEdition() > edpb || defaults.GetMaximumEdition() < edpb { + // This should never happen protodesc.(FileOptions).New would fail when + // initializing the file descriptor. + // This most likely means the embedded defaults were not updated. + fmt.Fprintf(os.Stderr, "internal error: unsupported edition %v (did you forget to update the embedded defaults (i.e. the bootstrap descriptor proto)?)\n", edpb) + os.Exit(1) + } + fs := defaults.GetDefaults()[0].GetFeatures() + // Using a linear search for now. + // Editions are guaranteed to be sorted and thus we could use a binary search. + // Given that there are only a handful of editions (with one more per year) + // there is not much reason to use a binary search. + for _, def := range defaults.GetDefaults() { + if def.GetEdition() <= edpb { + fs = def.GetFeatures() + } else { + break + } + } + defaultsCache[ed] = fs + return fs +} + +// mergeEditionFeatures merges the parent and child feature sets. This function +// should be used when initializing Go descriptors from descriptor protos which +// is why the parent is a filedesc.EditionsFeatures (Go representation) while +// the child is a descriptorproto.FeatureSet (protoc representation). +// Any feature set by the child overwrites what is set by the parent. +func mergeEditionFeatures(parentDesc protoreflect.Descriptor, child *descriptorpb.FeatureSet) filedesc.EditionFeatures { + var parentFS filedesc.EditionFeatures + switch p := parentDesc.(type) { + case *filedesc.File: + parentFS = p.L1.EditionFeatures + case *filedesc.Message: + parentFS = p.L1.EditionFeatures + default: + panic(fmt.Sprintf("unknown parent type %T", parentDesc)) + } + if child == nil { + return parentFS + } + if fp := child.FieldPresence; fp != nil { + parentFS.IsFieldPresence = *fp == descriptorpb.FeatureSet_LEGACY_REQUIRED || + *fp == descriptorpb.FeatureSet_EXPLICIT + parentFS.IsLegacyRequired = *fp == descriptorpb.FeatureSet_LEGACY_REQUIRED + } + if et := child.EnumType; et != nil { + parentFS.IsOpenEnum = *et == descriptorpb.FeatureSet_OPEN + } + + if rfe := child.RepeatedFieldEncoding; rfe != nil { + parentFS.IsPacked = *rfe == descriptorpb.FeatureSet_PACKED + } + + if utf8val := child.Utf8Validation; utf8val != nil { + parentFS.IsUTF8Validated = *utf8val == descriptorpb.FeatureSet_VERIFY + } + + if me := child.MessageEncoding; me != nil { + parentFS.IsDelimitedEncoded = *me == descriptorpb.FeatureSet_DELIMITED + } + + if jf := child.JsonFormat; jf != nil { + parentFS.IsJSONCompliant = *jf == descriptorpb.FeatureSet_ALLOW + } + + if goFeatures, ok := proto.GetExtension(child, gofeaturespb.E_Go).(*gofeaturespb.GoFeatures); ok && goFeatures != nil { + if luje := goFeatures.LegacyUnmarshalJsonEnum; luje != nil { + parentFS.GenerateLegacyUnmarshalJSON = *luje + } + } + + return parentFS +} + +// initFileDescFromFeatureSet initializes editions related fields in fd based +// on fs. If fs is nil it is assumed to be an empty featureset and all fields +// will be initialized with the appropriate default. fd.L1.Edition must be set +// before calling this function. +func initFileDescFromFeatureSet(fd *filedesc.File, fs *descriptorpb.FeatureSet) { + dfs := getFeatureSetFor(fd.L1.Edition) + // initialize the featureset with the defaults + fd.L1.EditionFeatures = mergeEditionFeatures(fd, dfs) + // overwrite any options explicitly specified + fd.L1.EditionFeatures = mergeEditionFeatures(fd, fs) +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go index a7c5ceff..9d6e0542 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go @@ -16,7 +16,7 @@ import ( "google.golang.org/protobuf/types/descriptorpb" ) -// ToFileDescriptorProto copies a protoreflect.FileDescriptor into a +// ToFileDescriptorProto copies a [protoreflect.FileDescriptor] into a // google.protobuf.FileDescriptorProto message. func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileDescriptorProto { p := &descriptorpb.FileDescriptorProto{ @@ -70,13 +70,13 @@ func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileD for i, exts := 0, file.Extensions(); i < exts.Len(); i++ { p.Extension = append(p.Extension, ToFieldDescriptorProto(exts.Get(i))) } - if syntax := file.Syntax(); syntax != protoreflect.Proto2 { + if syntax := file.Syntax(); syntax != protoreflect.Proto2 && syntax.IsValid() { p.Syntax = proto.String(file.Syntax().String()) } return p } -// ToDescriptorProto copies a protoreflect.MessageDescriptor into a +// ToDescriptorProto copies a [protoreflect.MessageDescriptor] into a // google.protobuf.DescriptorProto message. func ToDescriptorProto(message protoreflect.MessageDescriptor) *descriptorpb.DescriptorProto { p := &descriptorpb.DescriptorProto{ @@ -119,7 +119,7 @@ func ToDescriptorProto(message protoreflect.MessageDescriptor) *descriptorpb.Des return p } -// ToFieldDescriptorProto copies a protoreflect.FieldDescriptor into a +// ToFieldDescriptorProto copies a [protoreflect.FieldDescriptor] into a // google.protobuf.FieldDescriptorProto message. func ToFieldDescriptorProto(field protoreflect.FieldDescriptor) *descriptorpb.FieldDescriptorProto { p := &descriptorpb.FieldDescriptorProto{ @@ -168,7 +168,7 @@ func ToFieldDescriptorProto(field protoreflect.FieldDescriptor) *descriptorpb.Fi return p } -// ToOneofDescriptorProto copies a protoreflect.OneofDescriptor into a +// ToOneofDescriptorProto copies a [protoreflect.OneofDescriptor] into a // google.protobuf.OneofDescriptorProto message. func ToOneofDescriptorProto(oneof protoreflect.OneofDescriptor) *descriptorpb.OneofDescriptorProto { return &descriptorpb.OneofDescriptorProto{ @@ -177,7 +177,7 @@ func ToOneofDescriptorProto(oneof protoreflect.OneofDescriptor) *descriptorpb.On } } -// ToEnumDescriptorProto copies a protoreflect.EnumDescriptor into a +// ToEnumDescriptorProto copies a [protoreflect.EnumDescriptor] into a // google.protobuf.EnumDescriptorProto message. func ToEnumDescriptorProto(enum protoreflect.EnumDescriptor) *descriptorpb.EnumDescriptorProto { p := &descriptorpb.EnumDescriptorProto{ @@ -200,7 +200,7 @@ func ToEnumDescriptorProto(enum protoreflect.EnumDescriptor) *descriptorpb.EnumD return p } -// ToEnumValueDescriptorProto copies a protoreflect.EnumValueDescriptor into a +// ToEnumValueDescriptorProto copies a [protoreflect.EnumValueDescriptor] into a // google.protobuf.EnumValueDescriptorProto message. func ToEnumValueDescriptorProto(value protoreflect.EnumValueDescriptor) *descriptorpb.EnumValueDescriptorProto { return &descriptorpb.EnumValueDescriptorProto{ @@ -210,7 +210,7 @@ func ToEnumValueDescriptorProto(value protoreflect.EnumValueDescriptor) *descrip } } -// ToServiceDescriptorProto copies a protoreflect.ServiceDescriptor into a +// ToServiceDescriptorProto copies a [protoreflect.ServiceDescriptor] into a // google.protobuf.ServiceDescriptorProto message. func ToServiceDescriptorProto(service protoreflect.ServiceDescriptor) *descriptorpb.ServiceDescriptorProto { p := &descriptorpb.ServiceDescriptorProto{ @@ -223,7 +223,7 @@ func ToServiceDescriptorProto(service protoreflect.ServiceDescriptor) *descripto return p } -// ToMethodDescriptorProto copies a protoreflect.MethodDescriptor into a +// ToMethodDescriptorProto copies a [protoreflect.MethodDescriptor] into a // google.protobuf.MethodDescriptorProto message. func ToMethodDescriptorProto(method protoreflect.MethodDescriptor) *descriptorpb.MethodDescriptorProto { p := &descriptorpb.MethodDescriptorProto{ diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go index 55aa1492..00b01fbd 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go @@ -10,46 +10,46 @@ // // # Protocol Buffer Descriptors // -// Protobuf descriptors (e.g., EnumDescriptor or MessageDescriptor) +// Protobuf descriptors (e.g., [EnumDescriptor] or [MessageDescriptor]) // are immutable objects that represent protobuf type information. // They are wrappers around the messages declared in descriptor.proto. // Protobuf descriptors alone lack any information regarding Go types. // -// Enums and messages generated by this module implement Enum and ProtoMessage, +// Enums and messages generated by this module implement [Enum] and [ProtoMessage], // where the Descriptor and ProtoReflect.Descriptor accessors respectively // return the protobuf descriptor for the values. // // The protobuf descriptor interfaces are not meant to be implemented by // user code since they might need to be extended in the future to support // additions to the protobuf language. -// The "google.golang.org/protobuf/reflect/protodesc" package converts between +// The [google.golang.org/protobuf/reflect/protodesc] package converts between // google.protobuf.DescriptorProto messages and protobuf descriptors. // // # Go Type Descriptors // -// A type descriptor (e.g., EnumType or MessageType) is a constructor for +// A type descriptor (e.g., [EnumType] or [MessageType]) is a constructor for // a concrete Go type that represents the associated protobuf descriptor. // There is commonly a one-to-one relationship between protobuf descriptors and // Go type descriptors, but it can potentially be a one-to-many relationship. // -// Enums and messages generated by this module implement Enum and ProtoMessage, +// Enums and messages generated by this module implement [Enum] and [ProtoMessage], // where the Type and ProtoReflect.Type accessors respectively // return the protobuf descriptor for the values. // -// The "google.golang.org/protobuf/types/dynamicpb" package can be used to +// The [google.golang.org/protobuf/types/dynamicpb] package can be used to // create Go type descriptors from protobuf descriptors. // // # Value Interfaces // -// The Enum and Message interfaces provide a reflective view over an +// The [Enum] and [Message] interfaces provide a reflective view over an // enum or message instance. For enums, it provides the ability to retrieve // the enum value number for any concrete enum type. For messages, it provides // the ability to access or manipulate fields of the message. // -// To convert a proto.Message to a protoreflect.Message, use the +// To convert a [google.golang.org/protobuf/proto.Message] to a [protoreflect.Message], use the // former's ProtoReflect method. Since the ProtoReflect method is new to the // v2 message interface, it may not be present on older message implementations. -// The "github.com/golang/protobuf/proto".MessageReflect function can be used +// The [github.com/golang/protobuf/proto.MessageReflect] function can be used // to obtain a reflective view on older messages. // // # Relationships @@ -71,12 +71,12 @@ // │ │ // └────────────────── Type() ───────┘ // -// • An EnumType describes a concrete Go enum type. +// • An [EnumType] describes a concrete Go enum type. // It has an EnumDescriptor and can construct an Enum instance. // -// • An EnumDescriptor describes an abstract protobuf enum type. +// • An [EnumDescriptor] describes an abstract protobuf enum type. // -// • An Enum is a concrete enum instance. Generated enums implement Enum. +// • An [Enum] is a concrete enum instance. Generated enums implement Enum. // // ┌──────────────── New() ─────────────────┠// │ │ @@ -90,24 +90,26 @@ // │ │ // └─────────────────── Type() ─────────┘ // -// • A MessageType describes a concrete Go message type. -// It has a MessageDescriptor and can construct a Message instance. -// Just as how Go's reflect.Type is a reflective description of a Go type, -// a MessageType is a reflective description of a Go type for a protobuf message. +// • A [MessageType] describes a concrete Go message type. +// It has a [MessageDescriptor] and can construct a [Message] instance. +// Just as how Go's [reflect.Type] is a reflective description of a Go type, +// a [MessageType] is a reflective description of a Go type for a protobuf message. // -// • A MessageDescriptor describes an abstract protobuf message type. -// It has no understanding of Go types. In order to construct a MessageType -// from just a MessageDescriptor, you can consider looking up the message type -// in the global registry using protoregistry.GlobalTypes.FindMessageByName -// or constructing a dynamic MessageType using dynamicpb.NewMessageType. +// • A [MessageDescriptor] describes an abstract protobuf message type. +// It has no understanding of Go types. In order to construct a [MessageType] +// from just a [MessageDescriptor], you can consider looking up the message type +// in the global registry using the FindMessageByName method on +// [google.golang.org/protobuf/reflect/protoregistry.GlobalTypes] +// or constructing a dynamic [MessageType] using +// [google.golang.org/protobuf/types/dynamicpb.NewMessageType]. // -// • A Message is a reflective view over a concrete message instance. -// Generated messages implement ProtoMessage, which can convert to a Message. -// Just as how Go's reflect.Value is a reflective view over a Go value, -// a Message is a reflective view over a concrete protobuf message instance. -// Using Go reflection as an analogy, the ProtoReflect method is similar to -// calling reflect.ValueOf, and the Message.Interface method is similar to -// calling reflect.Value.Interface. +// • A [Message] is a reflective view over a concrete message instance. +// Generated messages implement [ProtoMessage], which can convert to a [Message]. +// Just as how Go's [reflect.Value] is a reflective view over a Go value, +// a [Message] is a reflective view over a concrete protobuf message instance. +// Using Go reflection as an analogy, the [ProtoMessage.ProtoReflect] method is similar to +// calling [reflect.ValueOf], and the [Message.Interface] method is similar to +// calling [reflect.Value.Interface]. // // ┌── TypeDescriptor() ──┠┌───── Descriptor() ─────┠// │ V │ V @@ -119,15 +121,15 @@ // │ │ // └────── implements ────────┘ // -// • An ExtensionType describes a concrete Go implementation of an extension. -// It has an ExtensionTypeDescriptor and can convert to/from -// abstract Values and Go values. +// • An [ExtensionType] describes a concrete Go implementation of an extension. +// It has an [ExtensionTypeDescriptor] and can convert to/from +// an abstract [Value] and a Go value. // -// • An ExtensionTypeDescriptor is an ExtensionDescriptor -// which also has an ExtensionType. +// • An [ExtensionTypeDescriptor] is an [ExtensionDescriptor] +// which also has an [ExtensionType]. // -// • An ExtensionDescriptor describes an abstract protobuf extension field and -// may not always be an ExtensionTypeDescriptor. +// • An [ExtensionDescriptor] describes an abstract protobuf extension field and +// may not always be an [ExtensionTypeDescriptor]. package protoreflect import ( @@ -142,7 +144,7 @@ type doNotImplement pragma.DoNotImplement // ProtoMessage is the top-level interface that all proto messages implement. // This is declared in the protoreflect package to avoid a cyclic dependency; -// use the proto.Message type instead, which aliases this type. +// use the [google.golang.org/protobuf/proto.Message] type instead, which aliases this type. type ProtoMessage interface{ ProtoReflect() Message } // Syntax is the language version of the proto file. @@ -151,8 +153,9 @@ type Syntax syntax type syntax int8 // keep exact type opaque as the int type may change const ( - Proto2 Syntax = 2 - Proto3 Syntax = 3 + Proto2 Syntax = 2 + Proto3 Syntax = 3 + Editions Syntax = 4 ) // IsValid reports whether the syntax is valid. @@ -172,6 +175,8 @@ func (s Syntax) String() string { return "proto2" case Proto3: return "proto3" + case Editions: + return "editions" default: return fmt.Sprintf("", s) } @@ -436,7 +441,7 @@ type Names interface { // FullName is a qualified name that uniquely identifies a proto declaration. // A qualified name is the concatenation of the proto package along with the // fully-declared name (i.e., name of parent preceding the name of the child), -// with a '.' delimiter placed between each Name. +// with a '.' delimiter placed between each [Name]. // // This should not have any leading or trailing dots. type FullName string // e.g., "google.protobuf.Field.Kind" @@ -480,7 +485,7 @@ func isLetterDigit(c byte) bool { } // Name returns the short name, which is the last identifier segment. -// A single segment FullName is the Name itself. +// A single segment FullName is the [Name] itself. func (n FullName) Name() Name { if i := strings.LastIndexByte(string(n), '.'); i >= 0 { return Name(n[i+1:]) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go index 717b106f..7dcc2ff0 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go @@ -35,7 +35,7 @@ func (p *SourcePath) appendFileDescriptorProto(b []byte) []byte { b = p.appendSingularField(b, "source_code_info", (*SourcePath).appendSourceCodeInfo) case 12: b = p.appendSingularField(b, "syntax", nil) - case 13: + case 14: b = p.appendSingularField(b, "edition", nil) } return b @@ -160,8 +160,6 @@ func (p *SourcePath) appendFileOptions(b []byte) []byte { b = p.appendSingularField(b, "java_generic_services", nil) case 18: b = p.appendSingularField(b, "py_generic_services", nil) - case 42: - b = p.appendSingularField(b, "php_generic_services", nil) case 23: b = p.appendSingularField(b, "deprecated", nil) case 31: @@ -180,6 +178,8 @@ func (p *SourcePath) appendFileOptions(b []byte) []byte { b = p.appendSingularField(b, "php_metadata_namespace", nil) case 45: b = p.appendSingularField(b, "ruby_package", nil) + case 50: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -240,6 +240,8 @@ func (p *SourcePath) appendMessageOptions(b []byte) []byte { b = p.appendSingularField(b, "map_entry", nil) case 11: b = p.appendSingularField(b, "deprecated_legacy_json_field_conflicts", nil) + case 12: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -285,6 +287,8 @@ func (p *SourcePath) appendEnumOptions(b []byte) []byte { b = p.appendSingularField(b, "deprecated", nil) case 6: b = p.appendSingularField(b, "deprecated_legacy_json_field_conflicts", nil) + case 7: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -330,6 +334,8 @@ func (p *SourcePath) appendServiceOptions(b []byte) []byte { return b } switch (*p)[0] { + case 34: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 33: b = p.appendSingularField(b, "deprecated", nil) case 999: @@ -361,16 +367,39 @@ func (p *SourcePath) appendFieldOptions(b []byte) []byte { b = p.appendSingularField(b, "debug_redact", nil) case 17: b = p.appendSingularField(b, "retention", nil) - case 18: - b = p.appendSingularField(b, "target", nil) case 19: b = p.appendRepeatedField(b, "targets", nil) + case 20: + b = p.appendRepeatedField(b, "edition_defaults", (*SourcePath).appendFieldOptions_EditionDefault) + case 21: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } return b } +func (p *SourcePath) appendFeatureSet(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "field_presence", nil) + case 2: + b = p.appendSingularField(b, "enum_type", nil) + case 3: + b = p.appendSingularField(b, "repeated_field_encoding", nil) + case 4: + b = p.appendSingularField(b, "utf8_validation", nil) + case 5: + b = p.appendSingularField(b, "message_encoding", nil) + case 6: + b = p.appendSingularField(b, "json_format", nil) + } + return b +} + func (p *SourcePath) appendUninterpretedOption(b []byte) []byte { if len(*p) == 0 { return b @@ -422,6 +451,8 @@ func (p *SourcePath) appendExtensionRangeOptions(b []byte) []byte { b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) case 2: b = p.appendRepeatedField(b, "declaration", (*SourcePath).appendExtensionRangeOptions_Declaration) + case 50: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 3: b = p.appendSingularField(b, "verification", nil) } @@ -433,6 +464,8 @@ func (p *SourcePath) appendOneofOptions(b []byte) []byte { return b } switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -446,6 +479,10 @@ func (p *SourcePath) appendEnumValueOptions(b []byte) []byte { switch (*p)[0] { case 1: b = p.appendSingularField(b, "deprecated", nil) + case 2: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) + case 3: + b = p.appendSingularField(b, "debug_redact", nil) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -461,12 +498,27 @@ func (p *SourcePath) appendMethodOptions(b []byte) []byte { b = p.appendSingularField(b, "deprecated", nil) case 34: b = p.appendSingularField(b, "idempotency_level", nil) + case 35: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } return b } +func (p *SourcePath) appendFieldOptions_EditionDefault(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 3: + b = p.appendSingularField(b, "edition", nil) + case 2: + b = p.appendSingularField(b, "value", nil) + } + return b +} + func (p *SourcePath) appendUninterpretedOption_NamePart(b []byte) []byte { if len(*p) == 0 { return b @@ -491,8 +543,6 @@ func (p *SourcePath) appendExtensionRangeOptions_Declaration(b []byte) []byte { b = p.appendSingularField(b, "full_name", nil) case 3: b = p.appendSingularField(b, "type", nil) - case 4: - b = p.appendSingularField(b, "is_repeated", nil) case 5: b = p.appendSingularField(b, "reserved", nil) case 6: diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go index 3867470d..60ff62b4 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go @@ -12,7 +12,7 @@ package protoreflect // exactly identical. However, it is possible for the same semantically // identical proto type to be represented by multiple type descriptors. // -// For example, suppose we have t1 and t2 which are both MessageDescriptors. +// For example, suppose we have t1 and t2 which are both an [MessageDescriptor]. // If t1 == t2, then the types are definitely equal and all accessors return // the same information. However, if t1 != t2, then it is still possible that // they still represent the same proto type (e.g., t1.FullName == t2.FullName). @@ -115,7 +115,7 @@ type Descriptor interface { // corresponds with the google.protobuf.FileDescriptorProto message. // // Top-level declarations: -// EnumDescriptor, MessageDescriptor, FieldDescriptor, and/or ServiceDescriptor. +// [EnumDescriptor], [MessageDescriptor], [FieldDescriptor], and/or [ServiceDescriptor]. type FileDescriptor interface { Descriptor // Descriptor.FullName is identical to Package @@ -180,8 +180,8 @@ type FileImport struct { // corresponds with the google.protobuf.DescriptorProto message. // // Nested declarations: -// FieldDescriptor, OneofDescriptor, FieldDescriptor, EnumDescriptor, -// and/or MessageDescriptor. +// [FieldDescriptor], [OneofDescriptor], [FieldDescriptor], [EnumDescriptor], +// and/or [MessageDescriptor]. type MessageDescriptor interface { Descriptor @@ -214,7 +214,7 @@ type MessageDescriptor interface { ExtensionRanges() FieldRanges // ExtensionRangeOptions returns the ith extension range options. // - // To avoid a dependency cycle, this method returns a proto.Message value, + // To avoid a dependency cycle, this method returns a proto.Message] value, // which always contains a google.protobuf.ExtensionRangeOptions message. // This method returns a typed nil-pointer if no options are present. // The caller must import the descriptorpb package to use this. @@ -231,9 +231,9 @@ type MessageDescriptor interface { } type isMessageDescriptor interface{ ProtoType(MessageDescriptor) } -// MessageType encapsulates a MessageDescriptor with a concrete Go implementation. +// MessageType encapsulates a [MessageDescriptor] with a concrete Go implementation. // It is recommended that implementations of this interface also implement the -// MessageFieldTypes interface. +// [MessageFieldTypes] interface. type MessageType interface { // New returns a newly allocated empty message. // It may return nil for synthetic messages representing a map entry. @@ -249,19 +249,19 @@ type MessageType interface { Descriptor() MessageDescriptor } -// MessageFieldTypes extends a MessageType by providing type information +// MessageFieldTypes extends a [MessageType] by providing type information // regarding enums and messages referenced by the message fields. type MessageFieldTypes interface { MessageType - // Enum returns the EnumType for the ith field in Descriptor.Fields. + // Enum returns the EnumType for the ith field in MessageDescriptor.Fields. // It returns nil if the ith field is not an enum kind. // It panics if out of bounds. // // Invariant: mt.Enum(i).Descriptor() == mt.Descriptor().Fields(i).Enum() Enum(i int) EnumType - // Message returns the MessageType for the ith field in Descriptor.Fields. + // Message returns the MessageType for the ith field in MessageDescriptor.Fields. // It returns nil if the ith field is not a message or group kind. // It panics if out of bounds. // @@ -286,8 +286,8 @@ type MessageDescriptors interface { // corresponds with the google.protobuf.FieldDescriptorProto message. // // It is used for both normal fields defined within the parent message -// (e.g., MessageDescriptor.Fields) and fields that extend some remote message -// (e.g., FileDescriptor.Extensions or MessageDescriptor.Extensions). +// (e.g., [MessageDescriptor.Fields]) and fields that extend some remote message +// (e.g., [FileDescriptor.Extensions] or [MessageDescriptor.Extensions]). type FieldDescriptor interface { Descriptor @@ -344,7 +344,7 @@ type FieldDescriptor interface { // IsMap reports whether this field represents a map, // where the value type for the associated field is a Map. // It is equivalent to checking whether Cardinality is Repeated, - // that the Kind is MessageKind, and that Message.IsMapEntry reports true. + // that the Kind is MessageKind, and that MessageDescriptor.IsMapEntry reports true. IsMap() bool // MapKey returns the field descriptor for the key in the map entry. @@ -419,7 +419,7 @@ type OneofDescriptor interface { // IsSynthetic reports whether this is a synthetic oneof created to support // proto3 optional semantics. If true, Fields contains exactly one field - // with HasOptionalKeyword specified. + // with FieldDescriptor.HasOptionalKeyword specified. IsSynthetic() bool // Fields is a list of fields belonging to this oneof. @@ -442,10 +442,10 @@ type OneofDescriptors interface { doNotImplement } -// ExtensionDescriptor is an alias of FieldDescriptor for documentation. +// ExtensionDescriptor is an alias of [FieldDescriptor] for documentation. type ExtensionDescriptor = FieldDescriptor -// ExtensionTypeDescriptor is an ExtensionDescriptor with an associated ExtensionType. +// ExtensionTypeDescriptor is an [ExtensionDescriptor] with an associated [ExtensionType]. type ExtensionTypeDescriptor interface { ExtensionDescriptor @@ -470,12 +470,12 @@ type ExtensionDescriptors interface { doNotImplement } -// ExtensionType encapsulates an ExtensionDescriptor with a concrete +// ExtensionType encapsulates an [ExtensionDescriptor] with a concrete // Go implementation. The nested field descriptor must be for a extension field. // // While a normal field is a member of the parent message that it is declared -// within (see Descriptor.Parent), an extension field is a member of some other -// target message (see ExtensionDescriptor.Extendee) and may have no +// within (see [Descriptor.Parent]), an extension field is a member of some other +// target message (see [FieldDescriptor.ContainingMessage]) and may have no // relationship with the parent. However, the full name of an extension field is // relative to the parent that it is declared within. // @@ -532,7 +532,7 @@ type ExtensionType interface { // corresponds with the google.protobuf.EnumDescriptorProto message. // // Nested declarations: -// EnumValueDescriptor. +// [EnumValueDescriptor]. type EnumDescriptor interface { Descriptor @@ -548,7 +548,7 @@ type EnumDescriptor interface { } type isEnumDescriptor interface{ ProtoType(EnumDescriptor) } -// EnumType encapsulates an EnumDescriptor with a concrete Go implementation. +// EnumType encapsulates an [EnumDescriptor] with a concrete Go implementation. type EnumType interface { // New returns an instance of this enum type with its value set to n. New(n EnumNumber) Enum @@ -610,7 +610,7 @@ type EnumValueDescriptors interface { // ServiceDescriptor describes a service and // corresponds with the google.protobuf.ServiceDescriptorProto message. // -// Nested declarations: MethodDescriptor. +// Nested declarations: [MethodDescriptor]. type ServiceDescriptor interface { Descriptor diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go index 37601b78..a7b0d06f 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go @@ -27,16 +27,16 @@ type Enum interface { // Message is a reflective interface for a concrete message value, // encapsulating both type and value information for the message. // -// Accessor/mutators for individual fields are keyed by FieldDescriptor. +// Accessor/mutators for individual fields are keyed by [FieldDescriptor]. // For non-extension fields, the descriptor must exactly match the // field known by the parent message. -// For extension fields, the descriptor must implement ExtensionTypeDescriptor, -// extend the parent message (i.e., have the same message FullName), and +// For extension fields, the descriptor must implement [ExtensionTypeDescriptor], +// extend the parent message (i.e., have the same message [FullName]), and // be within the parent's extension range. // -// Each field Value can be a scalar or a composite type (Message, List, or Map). -// See Value for the Go types associated with a FieldDescriptor. -// Providing a Value that is invalid or of an incorrect type panics. +// Each field [Value] can be a scalar or a composite type ([Message], [List], or [Map]). +// See [Value] for the Go types associated with a [FieldDescriptor]. +// Providing a [Value] that is invalid or of an incorrect type panics. type Message interface { // Descriptor returns message descriptor, which contains only the protobuf // type information for the message. @@ -152,7 +152,7 @@ type Message interface { // This method may return nil. // // The returned methods type is identical to - // "google.golang.org/protobuf/runtime/protoiface".Methods. + // google.golang.org/protobuf/runtime/protoiface.Methods. // Consult the protoiface package documentation for details. ProtoMethods() *methods } @@ -175,8 +175,8 @@ func (b RawFields) IsValid() bool { } // List is a zero-indexed, ordered list. -// The element Value type is determined by FieldDescriptor.Kind. -// Providing a Value that is invalid or of an incorrect type panics. +// The element [Value] type is determined by [FieldDescriptor.Kind]. +// Providing a [Value] that is invalid or of an incorrect type panics. type List interface { // Len reports the number of entries in the List. // Get, Set, and Truncate panic with out of bound indexes. @@ -226,9 +226,9 @@ type List interface { } // Map is an unordered, associative map. -// The entry MapKey type is determined by FieldDescriptor.MapKey.Kind. -// The entry Value type is determined by FieldDescriptor.MapValue.Kind. -// Providing a MapKey or Value that is invalid or of an incorrect type panics. +// The entry [MapKey] type is determined by [FieldDescriptor.MapKey].Kind. +// The entry [Value] type is determined by [FieldDescriptor.MapValue].Kind. +// Providing a [MapKey] or [Value] that is invalid or of an incorrect type panics. type Map interface { // Len reports the number of elements in the map. Len() int diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go index 59165254..654599d4 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go @@ -24,19 +24,19 @@ import ( // Unlike the == operator, a NaN is equal to another NaN. // // - Enums are equal if they contain the same number. -// Since Value does not contain an enum descriptor, +// Since [Value] does not contain an enum descriptor, // enum values do not consider the type of the enum. // // - Other scalar values are equal if they contain the same value. // -// - Message values are equal if they belong to the same message descriptor, +// - [Message] values are equal if they belong to the same message descriptor, // have the same set of populated known and extension field values, // and the same set of unknown fields values. // -// - Lists are equal if they are the same length and +// - [List] values are equal if they are the same length and // each corresponding element is equal. // -// - Maps are equal if they have the same set of keys and +// - [Map] values are equal if they have the same set of keys and // the corresponding value for each key is equal. func (v1 Value) Equal(v2 Value) bool { return equalValue(v1, v2) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go index 08e5ef73..16030973 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go @@ -11,7 +11,7 @@ import ( // Value is a union where only one Go type may be set at a time. // The Value is used to represent all possible values a field may take. -// The following shows which Go type is used to represent each proto Kind: +// The following shows which Go type is used to represent each proto [Kind]: // // â•”â•â•â•â•â•â•â•â•â•â•â•â•â•¤â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•— // â•‘ Go type │ Protobuf kind â•‘ @@ -31,22 +31,22 @@ import ( // // Multiple protobuf Kinds may be represented by a single Go type if the type // can losslessly represent the information for the proto kind. For example, -// Int64Kind, Sint64Kind, and Sfixed64Kind are all represented by int64, +// [Int64Kind], [Sint64Kind], and [Sfixed64Kind] are all represented by int64, // but use different integer encoding methods. // -// The List or Map types are used if the field cardinality is repeated. -// A field is a List if FieldDescriptor.IsList reports true. -// A field is a Map if FieldDescriptor.IsMap reports true. +// The [List] or [Map] types are used if the field cardinality is repeated. +// A field is a [List] if [FieldDescriptor.IsList] reports true. +// A field is a [Map] if [FieldDescriptor.IsMap] reports true. // // Converting to/from a Value and a concrete Go value panics on type mismatch. -// For example, ValueOf("hello").Int() panics because this attempts to +// For example, [ValueOf]("hello").Int() panics because this attempts to // retrieve an int64 from a string. // -// List, Map, and Message Values are called "composite" values. +// [List], [Map], and [Message] Values are called "composite" values. // // A composite Value may alias (reference) memory at some location, // such that changes to the Value updates the that location. -// A composite value acquired with a Mutable method, such as Message.Mutable, +// A composite value acquired with a Mutable method, such as [Message.Mutable], // always references the source object. // // For example: @@ -65,7 +65,7 @@ import ( // // appending to the List here may or may not modify the message. // list.Append(protoreflect.ValueOfInt32(0)) // -// Some operations, such as Message.Get, may return an "empty, read-only" +// Some operations, such as [Message.Get], may return an "empty, read-only" // composite Value. Modifying an empty, read-only value panics. type Value value @@ -306,7 +306,7 @@ func (v Value) Float() float64 { } } -// String returns v as a string. Since this method implements fmt.Stringer, +// String returns v as a string. Since this method implements [fmt.Stringer], // this returns the formatted string value for any non-string type. func (v Value) String() string { switch v.typ { @@ -327,7 +327,7 @@ func (v Value) Bytes() []byte { } } -// Enum returns v as a EnumNumber and panics if the type is not a EnumNumber. +// Enum returns v as a [EnumNumber] and panics if the type is not a [EnumNumber]. func (v Value) Enum() EnumNumber { switch v.typ { case enumType: @@ -337,7 +337,7 @@ func (v Value) Enum() EnumNumber { } } -// Message returns v as a Message and panics if the type is not a Message. +// Message returns v as a [Message] and panics if the type is not a [Message]. func (v Value) Message() Message { switch vi := v.getIface().(type) { case Message: @@ -347,7 +347,7 @@ func (v Value) Message() Message { } } -// List returns v as a List and panics if the type is not a List. +// List returns v as a [List] and panics if the type is not a [List]. func (v Value) List() List { switch vi := v.getIface().(type) { case List: @@ -357,7 +357,7 @@ func (v Value) List() List { } } -// Map returns v as a Map and panics if the type is not a Map. +// Map returns v as a [Map] and panics if the type is not a [Map]. func (v Value) Map() Map { switch vi := v.getIface().(type) { case Map: @@ -367,7 +367,7 @@ func (v Value) Map() Map { } } -// MapKey returns v as a MapKey and panics for invalid MapKey types. +// MapKey returns v as a [MapKey] and panics for invalid [MapKey] types. func (v Value) MapKey() MapKey { switch v.typ { case boolType, int32Type, int64Type, uint32Type, uint64Type, stringType: @@ -378,8 +378,8 @@ func (v Value) MapKey() MapKey { } // MapKey is used to index maps, where the Go type of the MapKey must match -// the specified key Kind (see MessageDescriptor.IsMapEntry). -// The following shows what Go type is used to represent each proto Kind: +// the specified key [Kind] (see [MessageDescriptor.IsMapEntry]). +// The following shows what Go type is used to represent each proto [Kind]: // // â•”â•â•â•â•â•â•â•â•â•â•¤â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•— // â•‘ Go type │ Protobuf kind â•‘ @@ -392,13 +392,13 @@ func (v Value) MapKey() MapKey { // â•‘ string │ StringKind â•‘ // â•šâ•â•â•â•â•â•â•â•â•â•§â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â• // -// A MapKey is constructed and accessed through a Value: +// A MapKey is constructed and accessed through a [Value]: // // k := ValueOf("hash").MapKey() // convert string to MapKey // s := k.String() // convert MapKey to string // -// The MapKey is a strict subset of valid types used in Value; -// converting a Value to a MapKey with an invalid type panics. +// The MapKey is a strict subset of valid types used in [Value]; +// converting a [Value] to a MapKey with an invalid type panics. type MapKey value // IsValid reports whether k is populated with a value. @@ -426,13 +426,13 @@ func (k MapKey) Uint() uint64 { return Value(k).Uint() } -// String returns k as a string. Since this method implements fmt.Stringer, +// String returns k as a string. Since this method implements [fmt.Stringer], // this returns the formatted string value for any non-string type. func (k MapKey) String() string { return Value(k).String() } -// Value returns k as a Value. +// Value returns k as a [Value]. func (k MapKey) Value() Value { return Value(k) } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go similarity index 97% rename from vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go rename to vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go index 702ddf22..b1fdbe3e 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine -// +build !purego,!appengine +//go:build !purego && !appengine && !go1.21 +// +build !purego,!appengine,!go1.21 package protoreflect diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go new file mode 100644 index 00000000..43547011 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go @@ -0,0 +1,87 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !purego && !appengine && go1.21 +// +build !purego,!appengine,go1.21 + +package protoreflect + +import ( + "unsafe" + + "google.golang.org/protobuf/internal/pragma" +) + +type ( + ifaceHeader struct { + _ [0]interface{} // if interfaces have greater alignment than unsafe.Pointer, this will enforce it. + Type unsafe.Pointer + Data unsafe.Pointer + } +) + +var ( + nilType = typeOf(nil) + boolType = typeOf(*new(bool)) + int32Type = typeOf(*new(int32)) + int64Type = typeOf(*new(int64)) + uint32Type = typeOf(*new(uint32)) + uint64Type = typeOf(*new(uint64)) + float32Type = typeOf(*new(float32)) + float64Type = typeOf(*new(float64)) + stringType = typeOf(*new(string)) + bytesType = typeOf(*new([]byte)) + enumType = typeOf(*new(EnumNumber)) +) + +// typeOf returns a pointer to the Go type information. +// The pointer is comparable and equal if and only if the types are identical. +func typeOf(t interface{}) unsafe.Pointer { + return (*ifaceHeader)(unsafe.Pointer(&t)).Type +} + +// value is a union where only one type can be represented at a time. +// The struct is 24B large on 64-bit systems and requires the minimum storage +// necessary to represent each possible type. +// +// The Go GC needs to be able to scan variables containing pointers. +// As such, pointers and non-pointers cannot be intermixed. +type value struct { + pragma.DoNotCompare // 0B + + // typ stores the type of the value as a pointer to the Go type. + typ unsafe.Pointer // 8B + + // ptr stores the data pointer for a String, Bytes, or interface value. + ptr unsafe.Pointer // 8B + + // num stores a Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, or + // Enum value as a raw uint64. + // + // It is also used to store the length of a String or Bytes value; + // the capacity is ignored. + num uint64 // 8B +} + +func valueOfString(v string) Value { + return Value{typ: stringType, ptr: unsafe.Pointer(unsafe.StringData(v)), num: uint64(len(v))} +} +func valueOfBytes(v []byte) Value { + return Value{typ: bytesType, ptr: unsafe.Pointer(unsafe.SliceData(v)), num: uint64(len(v))} +} +func valueOfIface(v interface{}) Value { + p := (*ifaceHeader)(unsafe.Pointer(&v)) + return Value{typ: p.Type, ptr: p.Data} +} + +func (v Value) getString() string { + return unsafe.String((*byte)(v.ptr), v.num) +} +func (v Value) getBytes() []byte { + return unsafe.Slice((*byte)(v.ptr), v.num) +} +func (v Value) getIface() (x interface{}) { + *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr} + return x +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go index aeb55977..6267dc52 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go +++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go @@ -5,12 +5,12 @@ // Package protoregistry provides data structures to register and lookup // protobuf descriptor types. // -// The Files registry contains file descriptors and provides the ability +// The [Files] registry contains file descriptors and provides the ability // to iterate over the files or lookup a specific descriptor within the files. -// Files only contains protobuf descriptors and has no understanding of Go +// [Files] only contains protobuf descriptors and has no understanding of Go // type information that may be associated with each descriptor. // -// The Types registry contains descriptor types for which there is a known +// The [Types] registry contains descriptor types for which there is a known // Go type associated with that descriptor. It provides the ability to iterate // over the registered types or lookup a type by name. package protoregistry @@ -218,7 +218,7 @@ func (r *Files) checkGenProtoConflict(path string) { // FindDescriptorByName looks up a descriptor by the full name. // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Files) FindDescriptorByName(name protoreflect.FullName) (protoreflect.Descriptor, error) { if r == nil { return nil, NotFound @@ -310,7 +310,7 @@ func (s *nameSuffix) Pop() (name protoreflect.Name) { // FindFileByPath looks up a file by the path. // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. // This returns an error if multiple files have the same path. func (r *Files) FindFileByPath(path string) (protoreflect.FileDescriptor, error) { if r == nil { @@ -431,7 +431,7 @@ func rangeTopLevelDescriptors(fd protoreflect.FileDescriptor, f func(protoreflec // A compliant implementation must deterministically return the same type // if no error is encountered. // -// The Types type implements this interface. +// The [Types] type implements this interface. type MessageTypeResolver interface { // FindMessageByName looks up a message by its full name. // E.g., "google.protobuf.Any" @@ -451,7 +451,7 @@ type MessageTypeResolver interface { // A compliant implementation must deterministically return the same type // if no error is encountered. // -// The Types type implements this interface. +// The [Types] type implements this interface. type ExtensionTypeResolver interface { // FindExtensionByName looks up a extension field by the field's full name. // Note that this is the full name of the field as determined by @@ -590,7 +590,7 @@ func (r *Types) register(kind string, desc protoreflect.Descriptor, typ interfac // FindEnumByName looks up an enum by its full name. // E.g., "google.protobuf.Field.Kind". // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Types) FindEnumByName(enum protoreflect.FullName) (protoreflect.EnumType, error) { if r == nil { return nil, NotFound @@ -611,7 +611,7 @@ func (r *Types) FindEnumByName(enum protoreflect.FullName) (protoreflect.EnumTyp // FindMessageByName looks up a message by its full name, // e.g. "google.protobuf.Any". // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Types) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) { if r == nil { return nil, NotFound @@ -632,7 +632,7 @@ func (r *Types) FindMessageByName(message protoreflect.FullName) (protoreflect.M // FindMessageByURL looks up a message by a URL identifier. // See documentation on google.protobuf.Any.type_url for the URL format. // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) { // This function is similar to FindMessageByName but // truncates anything before and including '/' in the URL. @@ -662,7 +662,7 @@ func (r *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) { // where the extension is declared and is unrelated to the full name of the // message being extended. // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Types) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { if r == nil { return nil, NotFound @@ -703,7 +703,7 @@ func (r *Types) FindExtensionByName(field protoreflect.FullName) (protoreflect.E // FindExtensionByNumber looks up a extension field by the field number // within some parent message, identified by full name. // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Types) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { if r == nil { return nil, NotFound diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index 04c00f73..78624cf6 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -48,6 +48,103 @@ import ( sync "sync" ) +// The full set of known editions. +type Edition int32 + +const ( + // A placeholder for an unknown edition value. + Edition_EDITION_UNKNOWN Edition = 0 + // Legacy syntax "editions". These pre-date editions, but behave much like + // distinct editions. These can't be used to specify the edition of proto + // files, but feature definitions must supply proto2/proto3 defaults for + // backwards compatibility. + Edition_EDITION_PROTO2 Edition = 998 + Edition_EDITION_PROTO3 Edition = 999 + // Editions that have been released. The specific values are arbitrary and + // should not be depended on, but they will always be time-ordered for easy + // comparison. + Edition_EDITION_2023 Edition = 1000 + Edition_EDITION_2024 Edition = 1001 + // Placeholder editions for testing feature resolution. These should not be + // used or relyed on outside of tests. + Edition_EDITION_1_TEST_ONLY Edition = 1 + Edition_EDITION_2_TEST_ONLY Edition = 2 + Edition_EDITION_99997_TEST_ONLY Edition = 99997 + Edition_EDITION_99998_TEST_ONLY Edition = 99998 + Edition_EDITION_99999_TEST_ONLY Edition = 99999 + // Placeholder for specifying unbounded edition support. This should only + // ever be used by plugins that can expect to never require any changes to + // support a new edition. + Edition_EDITION_MAX Edition = 2147483647 +) + +// Enum value maps for Edition. +var ( + Edition_name = map[int32]string{ + 0: "EDITION_UNKNOWN", + 998: "EDITION_PROTO2", + 999: "EDITION_PROTO3", + 1000: "EDITION_2023", + 1001: "EDITION_2024", + 1: "EDITION_1_TEST_ONLY", + 2: "EDITION_2_TEST_ONLY", + 99997: "EDITION_99997_TEST_ONLY", + 99998: "EDITION_99998_TEST_ONLY", + 99999: "EDITION_99999_TEST_ONLY", + 2147483647: "EDITION_MAX", + } + Edition_value = map[string]int32{ + "EDITION_UNKNOWN": 0, + "EDITION_PROTO2": 998, + "EDITION_PROTO3": 999, + "EDITION_2023": 1000, + "EDITION_2024": 1001, + "EDITION_1_TEST_ONLY": 1, + "EDITION_2_TEST_ONLY": 2, + "EDITION_99997_TEST_ONLY": 99997, + "EDITION_99998_TEST_ONLY": 99998, + "EDITION_99999_TEST_ONLY": 99999, + "EDITION_MAX": 2147483647, + } +) + +func (x Edition) Enum() *Edition { + p := new(Edition) + *p = x + return p +} + +func (x Edition) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Edition) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[0].Descriptor() +} + +func (Edition) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[0] +} + +func (x Edition) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *Edition) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = Edition(num) + return nil +} + +// Deprecated: Use Edition.Descriptor instead. +func (Edition) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{0} +} + // The verification state of the extension range. type ExtensionRangeOptions_VerificationState int32 @@ -80,11 +177,11 @@ func (x ExtensionRangeOptions_VerificationState) String() string { } func (ExtensionRangeOptions_VerificationState) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[0].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor() } func (ExtensionRangeOptions_VerificationState) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[0] + return &file_google_protobuf_descriptor_proto_enumTypes[1] } func (x ExtensionRangeOptions_VerificationState) Number() protoreflect.EnumNumber { @@ -125,9 +222,10 @@ const ( FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 // Tag-delimited aggregate. - // Group type is deprecated and not supported in proto3. However, Proto3 + // Group type is deprecated and not supported after google.protobuf. However, Proto3 // implementations should still be able to parse the group wire format and - // treat group fields as unknown fields. + // treat group fields as unknown fields. In Editions, the group wire format + // can be enabled via the `message_encoding` feature. FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 // Length-delimited aggregate. // New in version 2. @@ -195,11 +293,11 @@ func (x FieldDescriptorProto_Type) String() string { } func (FieldDescriptorProto_Type) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor() } func (FieldDescriptorProto_Type) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[1] + return &file_google_protobuf_descriptor_proto_enumTypes[2] } func (x FieldDescriptorProto_Type) Number() protoreflect.EnumNumber { @@ -226,21 +324,24 @@ type FieldDescriptorProto_Label int32 const ( // 0 is reserved for errors FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 - FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 + // The required label is only allowed in google.protobuf. In proto3 and Editions + // it's explicitly prohibited. In Editions, the `field_presence` feature + // can be used to get this behavior. + FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 ) // Enum value maps for FieldDescriptorProto_Label. var ( FieldDescriptorProto_Label_name = map[int32]string{ 1: "LABEL_OPTIONAL", - 2: "LABEL_REQUIRED", 3: "LABEL_REPEATED", + 2: "LABEL_REQUIRED", } FieldDescriptorProto_Label_value = map[string]int32{ "LABEL_OPTIONAL": 1, - "LABEL_REQUIRED": 2, "LABEL_REPEATED": 3, + "LABEL_REQUIRED": 2, } ) @@ -255,11 +356,11 @@ func (x FieldDescriptorProto_Label) String() string { } func (FieldDescriptorProto_Label) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor() } func (FieldDescriptorProto_Label) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[2] + return &file_google_protobuf_descriptor_proto_enumTypes[3] } func (x FieldDescriptorProto_Label) Number() protoreflect.EnumNumber { @@ -316,11 +417,11 @@ func (x FileOptions_OptimizeMode) String() string { } func (FileOptions_OptimizeMode) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor() } func (FileOptions_OptimizeMode) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[3] + return &file_google_protobuf_descriptor_proto_enumTypes[4] } func (x FileOptions_OptimizeMode) Number() protoreflect.EnumNumber { @@ -382,11 +483,11 @@ func (x FieldOptions_CType) String() string { } func (FieldOptions_CType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() } func (FieldOptions_CType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[4] + return &file_google_protobuf_descriptor_proto_enumTypes[5] } func (x FieldOptions_CType) Number() protoreflect.EnumNumber { @@ -444,11 +545,11 @@ func (x FieldOptions_JSType) String() string { } func (FieldOptions_JSType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor() } func (FieldOptions_JSType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[5] + return &file_google_protobuf_descriptor_proto_enumTypes[6] } func (x FieldOptions_JSType) Number() protoreflect.EnumNumber { @@ -506,11 +607,11 @@ func (x FieldOptions_OptionRetention) String() string { } func (FieldOptions_OptionRetention) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor() } func (FieldOptions_OptionRetention) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[6] + return &file_google_protobuf_descriptor_proto_enumTypes[7] } func (x FieldOptions_OptionRetention) Number() protoreflect.EnumNumber { @@ -590,11 +691,11 @@ func (x FieldOptions_OptionTargetType) String() string { } func (FieldOptions_OptionTargetType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor() } func (FieldOptions_OptionTargetType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[7] + return &file_google_protobuf_descriptor_proto_enumTypes[8] } func (x FieldOptions_OptionTargetType) Number() protoreflect.EnumNumber { @@ -652,11 +753,11 @@ func (x MethodOptions_IdempotencyLevel) String() string { } func (MethodOptions_IdempotencyLevel) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor() } func (MethodOptions_IdempotencyLevel) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[8] + return &file_google_protobuf_descriptor_proto_enumTypes[9] } func (x MethodOptions_IdempotencyLevel) Number() protoreflect.EnumNumber { @@ -678,6 +779,363 @@ func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{17, 0} } +type FeatureSet_FieldPresence int32 + +const ( + FeatureSet_FIELD_PRESENCE_UNKNOWN FeatureSet_FieldPresence = 0 + FeatureSet_EXPLICIT FeatureSet_FieldPresence = 1 + FeatureSet_IMPLICIT FeatureSet_FieldPresence = 2 + FeatureSet_LEGACY_REQUIRED FeatureSet_FieldPresence = 3 +) + +// Enum value maps for FeatureSet_FieldPresence. +var ( + FeatureSet_FieldPresence_name = map[int32]string{ + 0: "FIELD_PRESENCE_UNKNOWN", + 1: "EXPLICIT", + 2: "IMPLICIT", + 3: "LEGACY_REQUIRED", + } + FeatureSet_FieldPresence_value = map[string]int32{ + "FIELD_PRESENCE_UNKNOWN": 0, + "EXPLICIT": 1, + "IMPLICIT": 2, + "LEGACY_REQUIRED": 3, + } +) + +func (x FeatureSet_FieldPresence) Enum() *FeatureSet_FieldPresence { + p := new(FeatureSet_FieldPresence) + *p = x + return p +} + +func (x FeatureSet_FieldPresence) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_FieldPresence) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[10].Descriptor() +} + +func (FeatureSet_FieldPresence) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[10] +} + +func (x FeatureSet_FieldPresence) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_FieldPresence) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_FieldPresence(num) + return nil +} + +// Deprecated: Use FeatureSet_FieldPresence.Descriptor instead. +func (FeatureSet_FieldPresence) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0} +} + +type FeatureSet_EnumType int32 + +const ( + FeatureSet_ENUM_TYPE_UNKNOWN FeatureSet_EnumType = 0 + FeatureSet_OPEN FeatureSet_EnumType = 1 + FeatureSet_CLOSED FeatureSet_EnumType = 2 +) + +// Enum value maps for FeatureSet_EnumType. +var ( + FeatureSet_EnumType_name = map[int32]string{ + 0: "ENUM_TYPE_UNKNOWN", + 1: "OPEN", + 2: "CLOSED", + } + FeatureSet_EnumType_value = map[string]int32{ + "ENUM_TYPE_UNKNOWN": 0, + "OPEN": 1, + "CLOSED": 2, + } +) + +func (x FeatureSet_EnumType) Enum() *FeatureSet_EnumType { + p := new(FeatureSet_EnumType) + *p = x + return p +} + +func (x FeatureSet_EnumType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_EnumType) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[11].Descriptor() +} + +func (FeatureSet_EnumType) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[11] +} + +func (x FeatureSet_EnumType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_EnumType) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_EnumType(num) + return nil +} + +// Deprecated: Use FeatureSet_EnumType.Descriptor instead. +func (FeatureSet_EnumType) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 1} +} + +type FeatureSet_RepeatedFieldEncoding int32 + +const ( + FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN FeatureSet_RepeatedFieldEncoding = 0 + FeatureSet_PACKED FeatureSet_RepeatedFieldEncoding = 1 + FeatureSet_EXPANDED FeatureSet_RepeatedFieldEncoding = 2 +) + +// Enum value maps for FeatureSet_RepeatedFieldEncoding. +var ( + FeatureSet_RepeatedFieldEncoding_name = map[int32]string{ + 0: "REPEATED_FIELD_ENCODING_UNKNOWN", + 1: "PACKED", + 2: "EXPANDED", + } + FeatureSet_RepeatedFieldEncoding_value = map[string]int32{ + "REPEATED_FIELD_ENCODING_UNKNOWN": 0, + "PACKED": 1, + "EXPANDED": 2, + } +) + +func (x FeatureSet_RepeatedFieldEncoding) Enum() *FeatureSet_RepeatedFieldEncoding { + p := new(FeatureSet_RepeatedFieldEncoding) + *p = x + return p +} + +func (x FeatureSet_RepeatedFieldEncoding) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_RepeatedFieldEncoding) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[12].Descriptor() +} + +func (FeatureSet_RepeatedFieldEncoding) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[12] +} + +func (x FeatureSet_RepeatedFieldEncoding) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_RepeatedFieldEncoding) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_RepeatedFieldEncoding(num) + return nil +} + +// Deprecated: Use FeatureSet_RepeatedFieldEncoding.Descriptor instead. +func (FeatureSet_RepeatedFieldEncoding) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 2} +} + +type FeatureSet_Utf8Validation int32 + +const ( + FeatureSet_UTF8_VALIDATION_UNKNOWN FeatureSet_Utf8Validation = 0 + FeatureSet_VERIFY FeatureSet_Utf8Validation = 2 + FeatureSet_NONE FeatureSet_Utf8Validation = 3 +) + +// Enum value maps for FeatureSet_Utf8Validation. +var ( + FeatureSet_Utf8Validation_name = map[int32]string{ + 0: "UTF8_VALIDATION_UNKNOWN", + 2: "VERIFY", + 3: "NONE", + } + FeatureSet_Utf8Validation_value = map[string]int32{ + "UTF8_VALIDATION_UNKNOWN": 0, + "VERIFY": 2, + "NONE": 3, + } +) + +func (x FeatureSet_Utf8Validation) Enum() *FeatureSet_Utf8Validation { + p := new(FeatureSet_Utf8Validation) + *p = x + return p +} + +func (x FeatureSet_Utf8Validation) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_Utf8Validation) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[13].Descriptor() +} + +func (FeatureSet_Utf8Validation) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[13] +} + +func (x FeatureSet_Utf8Validation) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_Utf8Validation) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_Utf8Validation(num) + return nil +} + +// Deprecated: Use FeatureSet_Utf8Validation.Descriptor instead. +func (FeatureSet_Utf8Validation) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 3} +} + +type FeatureSet_MessageEncoding int32 + +const ( + FeatureSet_MESSAGE_ENCODING_UNKNOWN FeatureSet_MessageEncoding = 0 + FeatureSet_LENGTH_PREFIXED FeatureSet_MessageEncoding = 1 + FeatureSet_DELIMITED FeatureSet_MessageEncoding = 2 +) + +// Enum value maps for FeatureSet_MessageEncoding. +var ( + FeatureSet_MessageEncoding_name = map[int32]string{ + 0: "MESSAGE_ENCODING_UNKNOWN", + 1: "LENGTH_PREFIXED", + 2: "DELIMITED", + } + FeatureSet_MessageEncoding_value = map[string]int32{ + "MESSAGE_ENCODING_UNKNOWN": 0, + "LENGTH_PREFIXED": 1, + "DELIMITED": 2, + } +) + +func (x FeatureSet_MessageEncoding) Enum() *FeatureSet_MessageEncoding { + p := new(FeatureSet_MessageEncoding) + *p = x + return p +} + +func (x FeatureSet_MessageEncoding) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_MessageEncoding) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[14].Descriptor() +} + +func (FeatureSet_MessageEncoding) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[14] +} + +func (x FeatureSet_MessageEncoding) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_MessageEncoding) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_MessageEncoding(num) + return nil +} + +// Deprecated: Use FeatureSet_MessageEncoding.Descriptor instead. +func (FeatureSet_MessageEncoding) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 4} +} + +type FeatureSet_JsonFormat int32 + +const ( + FeatureSet_JSON_FORMAT_UNKNOWN FeatureSet_JsonFormat = 0 + FeatureSet_ALLOW FeatureSet_JsonFormat = 1 + FeatureSet_LEGACY_BEST_EFFORT FeatureSet_JsonFormat = 2 +) + +// Enum value maps for FeatureSet_JsonFormat. +var ( + FeatureSet_JsonFormat_name = map[int32]string{ + 0: "JSON_FORMAT_UNKNOWN", + 1: "ALLOW", + 2: "LEGACY_BEST_EFFORT", + } + FeatureSet_JsonFormat_value = map[string]int32{ + "JSON_FORMAT_UNKNOWN": 0, + "ALLOW": 1, + "LEGACY_BEST_EFFORT": 2, + } +) + +func (x FeatureSet_JsonFormat) Enum() *FeatureSet_JsonFormat { + p := new(FeatureSet_JsonFormat) + *p = x + return p +} + +func (x FeatureSet_JsonFormat) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_JsonFormat) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[15].Descriptor() +} + +func (FeatureSet_JsonFormat) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[15] +} + +func (x FeatureSet_JsonFormat) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_JsonFormat) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_JsonFormat(num) + return nil +} + +// Deprecated: Use FeatureSet_JsonFormat.Descriptor instead. +func (FeatureSet_JsonFormat) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 5} +} + // Represents the identified object's effect on the element in the original // .proto file. type GeneratedCodeInfo_Annotation_Semantic int32 @@ -716,11 +1174,11 @@ func (x GeneratedCodeInfo_Annotation_Semantic) String() string { } func (GeneratedCodeInfo_Annotation_Semantic) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor() } func (GeneratedCodeInfo_Annotation_Semantic) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[9] + return &file_google_protobuf_descriptor_proto_enumTypes[16] } func (x GeneratedCodeInfo_Annotation_Semantic) Number() protoreflect.EnumNumber { @@ -739,7 +1197,7 @@ func (x *GeneratedCodeInfo_Annotation_Semantic) UnmarshalJSON(b []byte) error { // Deprecated: Use GeneratedCodeInfo_Annotation_Semantic.Descriptor instead. func (GeneratedCodeInfo_Annotation_Semantic) EnumDescriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0, 0} + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{22, 0, 0} } // The protocol compiler can output a FileDescriptorSet containing the .proto @@ -822,8 +1280,8 @@ type FileDescriptorProto struct { // // If `edition` is present, this value must be "editions". Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` - // The edition of the proto file, which is an opaque string. - Edition *string `protobuf:"bytes,13,opt,name=edition" json:"edition,omitempty"` + // The edition of the proto file. + Edition *Edition `protobuf:"varint,14,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` } func (x *FileDescriptorProto) Reset() { @@ -942,11 +1400,11 @@ func (x *FileDescriptorProto) GetSyntax() string { return "" } -func (x *FileDescriptorProto) GetEdition() string { +func (x *FileDescriptorProto) GetEdition() Edition { if x != nil && x.Edition != nil { return *x.Edition } - return "" + return Edition_EDITION_UNKNOWN } // Describes a message type. @@ -1079,13 +1537,14 @@ type ExtensionRangeOptions struct { // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - // go/protobuf-stripping-extension-declarations - // Like Metadata, but we use a repeated field to hold all extension - // declarations. This should avoid the size increases of transforming a large - // extension range into small ranges in generated binaries. + // For external users: DO NOT USE. We are in the process of open sourcing + // extension declaration and executing internal cleanups before it can be + // used externally. Declaration []*ExtensionRangeOptions_Declaration `protobuf:"bytes,2,rep,name=declaration" json:"declaration,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,50,opt,name=features" json:"features,omitempty"` // The verification state of the range. - // TODO(b/278783756): flip the default to DECLARATION once all empty ranges + // TODO: flip the default to DECLARATION once all empty ranges // are marked as UNVERIFIED. Verification *ExtensionRangeOptions_VerificationState `protobuf:"varint,3,opt,name=verification,enum=google.protobuf.ExtensionRangeOptions_VerificationState,def=1" json:"verification,omitempty"` } @@ -1141,6 +1600,13 @@ func (x *ExtensionRangeOptions) GetDeclaration() []*ExtensionRangeOptions_Declar return nil } +func (x *ExtensionRangeOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *ExtensionRangeOptions) GetVerification() ExtensionRangeOptions_VerificationState { if x != nil && x.Verification != nil { return *x.Verification @@ -1186,12 +1652,12 @@ type FieldDescriptorProto struct { // If true, this is a proto3 "optional". When a proto3 field is optional, it // tracks presence regardless of field type. // - // When proto3_optional is true, this field must be belong to a oneof to - // signal to old proto3 clients that presence is tracked for this field. This - // oneof is known as a "synthetic" oneof, and this field must be its sole - // member (each proto3 optional field gets its own synthetic oneof). Synthetic - // oneofs exist in the descriptor only, and do not generate any API. Synthetic - // oneofs must be ordered after all "real" oneofs. + // When proto3_optional is true, this field must belong to a oneof to signal + // to old proto3 clients that presence is tracked for this field. This oneof + // is known as a "synthetic" oneof, and this field must be its sole member + // (each proto3 optional field gets its own synthetic oneof). Synthetic oneofs + // exist in the descriptor only, and do not generate any API. Synthetic oneofs + // must be ordered after all "real" oneofs. // // For message fields, proto3_optional doesn't create any semantic change, // since non-repeated message fields always track presence. However it still @@ -1738,7 +2204,6 @@ type FileOptions struct { CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` - PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` // Is this file deprecated? // Depending on the target platform, this can emit Deprecated annotations // for everything in the file, or it will be completely ignored; in the very @@ -1772,6 +2237,8 @@ type FileOptions struct { // is empty. When this option is not set, the package name will be used for // determining the ruby package. RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,50,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. // See the documentation for the "Options" section above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` @@ -1785,7 +2252,6 @@ const ( Default_FileOptions_CcGenericServices = bool(false) Default_FileOptions_JavaGenericServices = bool(false) Default_FileOptions_PyGenericServices = bool(false) - Default_FileOptions_PhpGenericServices = bool(false) Default_FileOptions_Deprecated = bool(false) Default_FileOptions_CcEnableArenas = bool(true) ) @@ -1893,13 +2359,6 @@ func (x *FileOptions) GetPyGenericServices() bool { return Default_FileOptions_PyGenericServices } -func (x *FileOptions) GetPhpGenericServices() bool { - if x != nil && x.PhpGenericServices != nil { - return *x.PhpGenericServices - } - return Default_FileOptions_PhpGenericServices -} - func (x *FileOptions) GetDeprecated() bool { if x != nil && x.Deprecated != nil { return *x.Deprecated @@ -1963,6 +2422,13 @@ func (x *FileOptions) GetRubyPackage() string { return "" } +func (x *FileOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *FileOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2006,10 +2472,6 @@ type MessageOptions struct { // for the message, or it will be completely ignored; in the very least, // this is a formalization for deprecating messages. Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // NOTE: Do not set the option in .proto files. Always use the maps syntax - // instead. The option should only be implicitly set by the proto compiler - // parser. - // // Whether the message is an automatically generated map entry type for the // maps field. // @@ -2030,6 +2492,10 @@ type MessageOptions struct { // use a native map in the target language to hold the keys and values. // The reflection APIs in such implementations still need to work as // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` // Enable the legacy handling of JSON field name conflicts. This lowercases // and strips underscored from the fields before comparison in proto3 only. @@ -2039,11 +2505,13 @@ type MessageOptions struct { // This should only be used as a temporary measure against broken builds due // to the change in behavior for JSON field name conflicts. // - // TODO(b/261750190) This is legacy behavior we plan to remove once downstream + // TODO This is legacy behavior we plan to remove once downstream // teams have had time to migrate. // // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,11,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,12,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2123,6 +2591,13 @@ func (x *MessageOptions) GetDeprecatedLegacyJsonFieldConflicts() bool { return false } +func (x *MessageOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2147,7 +2622,9 @@ type FieldOptions struct { // a more efficient representation on the wire. Rather than repeatedly // writing the tag and type for each element, the entire array is encoded as // a single length-delimited blob. In proto3, only explicit setting it to - // false will avoid using packed encoding. + // false will avoid using packed encoding. This option is prohibited in + // Editions, but the `repeated_field_encoding` feature can be used to control + // the behavior. Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` // The jstype option determines the JavaScript type used for values of the // field. The option is permitted only for 64 bit integral and fixed types @@ -2178,19 +2655,11 @@ type FieldOptions struct { // call from multiple threads concurrently, while non-const methods continue // to require exclusive access. // - // Note that implementations may choose not to check required fields within - // a lazy sub-message. That is, calling IsInitialized() on the outer message - // may return true even if the inner message has missing required fields. - // This is necessary because otherwise the inner message would have to be - // parsed in order to perform the check, defeating the purpose of lazy - // parsing. An implementation which chooses not to check required fields - // must be consistent about it. That is, for any particular sub-message, the - // implementation must either *always* check its required fields, or *never* - // check its required fields, regardless of whether or not the message has - // been parsed. - // - // As of May 2022, lazy verifies the contents of the byte stream during - // parsing. An invalid byte stream will cause the overall parsing to fail. + // Note that lazy message fields are still eagerly verified to check + // ill-formed wireformat or missing required fields. Calling IsInitialized() + // on the outer message would fail if the inner message has missing required + // fields. Failed verification would result in parsing failure (except when + // uninitialized messages are acceptable). Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` // unverified_lazy does no correctness checks on the byte stream. This should // only be used where lazy with verification is prohibitive for performance @@ -2205,11 +2674,12 @@ type FieldOptions struct { Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` // Indicate that the field value should not be printed out when using debug // formats, e.g. when the field contains sensitive credentials. - DebugRedact *bool `protobuf:"varint,16,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` - Retention *FieldOptions_OptionRetention `protobuf:"varint,17,opt,name=retention,enum=google.protobuf.FieldOptions_OptionRetention" json:"retention,omitempty"` - // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. - Target *FieldOptions_OptionTargetType `protobuf:"varint,18,opt,name=target,enum=google.protobuf.FieldOptions_OptionTargetType" json:"target,omitempty"` - Targets []FieldOptions_OptionTargetType `protobuf:"varint,19,rep,name=targets,enum=google.protobuf.FieldOptions_OptionTargetType" json:"targets,omitempty"` + DebugRedact *bool `protobuf:"varint,16,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` + Retention *FieldOptions_OptionRetention `protobuf:"varint,17,opt,name=retention,enum=google.protobuf.FieldOptions_OptionRetention" json:"retention,omitempty"` + Targets []FieldOptions_OptionTargetType `protobuf:"varint,19,rep,name=targets,enum=google.protobuf.FieldOptions_OptionTargetType" json:"targets,omitempty"` + EditionDefaults []*FieldOptions_EditionDefault `protobuf:"bytes,20,rep,name=edition_defaults,json=editionDefaults" json:"edition_defaults,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,21,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2320,17 +2790,23 @@ func (x *FieldOptions) GetRetention() FieldOptions_OptionRetention { return FieldOptions_RETENTION_UNKNOWN } -// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. -func (x *FieldOptions) GetTarget() FieldOptions_OptionTargetType { - if x != nil && x.Target != nil { - return *x.Target +func (x *FieldOptions) GetTargets() []FieldOptions_OptionTargetType { + if x != nil { + return x.Targets } - return FieldOptions_TARGET_TYPE_UNKNOWN + return nil } -func (x *FieldOptions) GetTargets() []FieldOptions_OptionTargetType { +func (x *FieldOptions) GetEditionDefaults() []*FieldOptions_EditionDefault { if x != nil { - return x.Targets + return x.EditionDefaults + } + return nil +} + +func (x *FieldOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features } return nil } @@ -2348,6 +2824,8 @@ type OneofOptions struct { unknownFields protoimpl.UnknownFields extensionFields protoimpl.ExtensionFields + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,1,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2384,6 +2862,13 @@ func (*OneofOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{13} } +func (x *OneofOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2409,11 +2894,13 @@ type EnumOptions struct { // and strips underscored from the fields before comparison in proto3 only. // The new behavior takes `json_name` into account and applies to proto2 as // well. - // TODO(b/261750190) Remove this legacy behavior once downstream teams have + // TODO Remove this legacy behavior once downstream teams have // had time to migrate. // // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,6,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,7,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2477,6 +2964,13 @@ func (x *EnumOptions) GetDeprecatedLegacyJsonFieldConflicts() bool { return false } +func (x *EnumOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2495,13 +2989,20 @@ type EnumValueOptions struct { // for the enum value, or it will be completely ignored; in the very least, // this is a formalization for deprecating enum values. Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,2,opt,name=features" json:"features,omitempty"` + // Indicate that fields annotated with this enum value should not be printed + // out when using debug formats, e.g. when the field contains sensitive + // credentials. + DebugRedact *bool `protobuf:"varint,3,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } // Default values for EnumValueOptions fields. const ( - Default_EnumValueOptions_Deprecated = bool(false) + Default_EnumValueOptions_Deprecated = bool(false) + Default_EnumValueOptions_DebugRedact = bool(false) ) func (x *EnumValueOptions) Reset() { @@ -2543,6 +3044,20 @@ func (x *EnumValueOptions) GetDeprecated() bool { return Default_EnumValueOptions_Deprecated } +func (x *EnumValueOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + +func (x *EnumValueOptions) GetDebugRedact() bool { + if x != nil && x.DebugRedact != nil { + return *x.DebugRedact + } + return Default_EnumValueOptions_DebugRedact +} + func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2556,6 +3071,8 @@ type ServiceOptions struct { unknownFields protoimpl.UnknownFields extensionFields protoimpl.ExtensionFields + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,34,opt,name=features" json:"features,omitempty"` // Is this service deprecated? // Depending on the target platform, this can emit Deprecated annotations // for the service, or it will be completely ignored; in the very least, @@ -2602,6 +3119,13 @@ func (*ServiceOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{16} } +func (x *ServiceOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *ServiceOptions) GetDeprecated() bool { if x != nil && x.Deprecated != nil { return *x.Deprecated @@ -2628,6 +3152,8 @@ type MethodOptions struct { // this is a formalization for deprecating methods. Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,35,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2684,6 +3210,13 @@ func (x *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { return Default_MethodOptions_IdempotencyLevel } +func (x *MethodOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2770,28 +3303,193 @@ func (x *UninterpretedOption) GetNegativeIntValue() int64 { if x != nil && x.NegativeIntValue != nil { return *x.NegativeIntValue } - return 0 + return 0 +} + +func (x *UninterpretedOption) GetDoubleValue() float64 { + if x != nil && x.DoubleValue != nil { + return *x.DoubleValue + } + return 0 +} + +func (x *UninterpretedOption) GetStringValue() []byte { + if x != nil { + return x.StringValue + } + return nil +} + +func (x *UninterpretedOption) GetAggregateValue() string { + if x != nil && x.AggregateValue != nil { + return *x.AggregateValue + } + return "" +} + +// TODO Enums in C++ gencode (and potentially other languages) are +// not well scoped. This means that each of the feature enums below can clash +// with each other. The short names we've chosen maximize call-site +// readability, but leave us very open to this scenario. A future feature will +// be designed and implemented to handle this, hopefully before we ever hit a +// conflict here. +type FeatureSet struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + FieldPresence *FeatureSet_FieldPresence `protobuf:"varint,1,opt,name=field_presence,json=fieldPresence,enum=google.protobuf.FeatureSet_FieldPresence" json:"field_presence,omitempty"` + EnumType *FeatureSet_EnumType `protobuf:"varint,2,opt,name=enum_type,json=enumType,enum=google.protobuf.FeatureSet_EnumType" json:"enum_type,omitempty"` + RepeatedFieldEncoding *FeatureSet_RepeatedFieldEncoding `protobuf:"varint,3,opt,name=repeated_field_encoding,json=repeatedFieldEncoding,enum=google.protobuf.FeatureSet_RepeatedFieldEncoding" json:"repeated_field_encoding,omitempty"` + Utf8Validation *FeatureSet_Utf8Validation `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"` + MessageEncoding *FeatureSet_MessageEncoding `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"` + JsonFormat *FeatureSet_JsonFormat `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"` +} + +func (x *FeatureSet) Reset() { + *x = FeatureSet{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FeatureSet) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FeatureSet) ProtoMessage() {} + +func (x *FeatureSet) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FeatureSet.ProtoReflect.Descriptor instead. +func (*FeatureSet) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19} +} + +func (x *FeatureSet) GetFieldPresence() FeatureSet_FieldPresence { + if x != nil && x.FieldPresence != nil { + return *x.FieldPresence + } + return FeatureSet_FIELD_PRESENCE_UNKNOWN +} + +func (x *FeatureSet) GetEnumType() FeatureSet_EnumType { + if x != nil && x.EnumType != nil { + return *x.EnumType + } + return FeatureSet_ENUM_TYPE_UNKNOWN +} + +func (x *FeatureSet) GetRepeatedFieldEncoding() FeatureSet_RepeatedFieldEncoding { + if x != nil && x.RepeatedFieldEncoding != nil { + return *x.RepeatedFieldEncoding + } + return FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN +} + +func (x *FeatureSet) GetUtf8Validation() FeatureSet_Utf8Validation { + if x != nil && x.Utf8Validation != nil { + return *x.Utf8Validation + } + return FeatureSet_UTF8_VALIDATION_UNKNOWN +} + +func (x *FeatureSet) GetMessageEncoding() FeatureSet_MessageEncoding { + if x != nil && x.MessageEncoding != nil { + return *x.MessageEncoding + } + return FeatureSet_MESSAGE_ENCODING_UNKNOWN +} + +func (x *FeatureSet) GetJsonFormat() FeatureSet_JsonFormat { + if x != nil && x.JsonFormat != nil { + return *x.JsonFormat + } + return FeatureSet_JSON_FORMAT_UNKNOWN +} + +// A compiled specification for the defaults of a set of features. These +// messages are generated from FeatureSet extensions and can be used to seed +// feature resolution. The resolution with this object becomes a simple search +// for the closest matching edition, followed by proto merges. +type FeatureSetDefaults struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Defaults []*FeatureSetDefaults_FeatureSetEditionDefault `protobuf:"bytes,1,rep,name=defaults" json:"defaults,omitempty"` + // The minimum supported edition (inclusive) when this was constructed. + // Editions before this will not have defaults. + MinimumEdition *Edition `protobuf:"varint,4,opt,name=minimum_edition,json=minimumEdition,enum=google.protobuf.Edition" json:"minimum_edition,omitempty"` + // The maximum known edition (inclusive) when this was constructed. Editions + // after this will not have reliable defaults. + MaximumEdition *Edition `protobuf:"varint,5,opt,name=maximum_edition,json=maximumEdition,enum=google.protobuf.Edition" json:"maximum_edition,omitempty"` +} + +func (x *FeatureSetDefaults) Reset() { + *x = FeatureSetDefaults{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FeatureSetDefaults) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FeatureSetDefaults) ProtoMessage() {} + +func (x *FeatureSetDefaults) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (x *UninterpretedOption) GetDoubleValue() float64 { - if x != nil && x.DoubleValue != nil { - return *x.DoubleValue - } - return 0 +// Deprecated: Use FeatureSetDefaults.ProtoReflect.Descriptor instead. +func (*FeatureSetDefaults) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20} } -func (x *UninterpretedOption) GetStringValue() []byte { +func (x *FeatureSetDefaults) GetDefaults() []*FeatureSetDefaults_FeatureSetEditionDefault { if x != nil { - return x.StringValue + return x.Defaults } return nil } -func (x *UninterpretedOption) GetAggregateValue() string { - if x != nil && x.AggregateValue != nil { - return *x.AggregateValue +func (x *FeatureSetDefaults) GetMinimumEdition() Edition { + if x != nil && x.MinimumEdition != nil { + return *x.MinimumEdition } - return "" + return Edition_EDITION_UNKNOWN +} + +func (x *FeatureSetDefaults) GetMaximumEdition() Edition { + if x != nil && x.MaximumEdition != nil { + return *x.MaximumEdition + } + return Edition_EDITION_UNKNOWN } // Encapsulates information about the original source file from which a @@ -2855,7 +3553,7 @@ type SourceCodeInfo struct { func (x *SourceCodeInfo) Reset() { *x = SourceCodeInfo{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + mi := &file_google_protobuf_descriptor_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2868,7 +3566,7 @@ func (x *SourceCodeInfo) String() string { func (*SourceCodeInfo) ProtoMessage() {} func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + mi := &file_google_protobuf_descriptor_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2881,7 +3579,7 @@ func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use SourceCodeInfo.ProtoReflect.Descriptor instead. func (*SourceCodeInfo) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19} + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{21} } func (x *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { @@ -2907,7 +3605,7 @@ type GeneratedCodeInfo struct { func (x *GeneratedCodeInfo) Reset() { *x = GeneratedCodeInfo{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + mi := &file_google_protobuf_descriptor_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2920,7 +3618,7 @@ func (x *GeneratedCodeInfo) String() string { func (*GeneratedCodeInfo) ProtoMessage() {} func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + mi := &file_google_protobuf_descriptor_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2933,7 +3631,7 @@ func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use GeneratedCodeInfo.ProtoReflect.Descriptor instead. func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20} + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{22} } func (x *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { @@ -2956,7 +3654,7 @@ type DescriptorProto_ExtensionRange struct { func (x *DescriptorProto_ExtensionRange) Reset() { *x = DescriptorProto_ExtensionRange{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[21] + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2969,7 +3667,7 @@ func (x *DescriptorProto_ExtensionRange) String() string { func (*DescriptorProto_ExtensionRange) ProtoMessage() {} func (x *DescriptorProto_ExtensionRange) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[21] + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3021,7 +3719,7 @@ type DescriptorProto_ReservedRange struct { func (x *DescriptorProto_ReservedRange) Reset() { *x = DescriptorProto_ReservedRange{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[22] + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3034,7 +3732,7 @@ func (x *DescriptorProto_ReservedRange) String() string { func (*DescriptorProto_ReservedRange) ProtoMessage() {} func (x *DescriptorProto_ReservedRange) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[22] + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3078,10 +3776,6 @@ type ExtensionRangeOptions_Declaration struct { // Metadata.type, Declaration.type must have a leading dot for messages // and enums. Type *string `protobuf:"bytes,3,opt,name=type" json:"type,omitempty"` - // Deprecated. Please use "repeated". - // - // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. - IsRepeated *bool `protobuf:"varint,4,opt,name=is_repeated,json=isRepeated" json:"is_repeated,omitempty"` // If true, indicates that the number is reserved in the extension range, // and any extension field with the number will fail to compile. Set this // when a declared extension field is deleted. @@ -3094,7 +3788,7 @@ type ExtensionRangeOptions_Declaration struct { func (x *ExtensionRangeOptions_Declaration) Reset() { *x = ExtensionRangeOptions_Declaration{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3107,7 +3801,7 @@ func (x *ExtensionRangeOptions_Declaration) String() string { func (*ExtensionRangeOptions_Declaration) ProtoMessage() {} func (x *ExtensionRangeOptions_Declaration) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3144,14 +3838,6 @@ func (x *ExtensionRangeOptions_Declaration) GetType() string { return "" } -// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. -func (x *ExtensionRangeOptions_Declaration) GetIsRepeated() bool { - if x != nil && x.IsRepeated != nil { - return *x.IsRepeated - } - return false -} - func (x *ExtensionRangeOptions_Declaration) GetReserved() bool { if x != nil && x.Reserved != nil { return *x.Reserved @@ -3184,7 +3870,7 @@ type EnumDescriptorProto_EnumReservedRange struct { func (x *EnumDescriptorProto_EnumReservedRange) Reset() { *x = EnumDescriptorProto_EnumReservedRange{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3197,7 +3883,7 @@ func (x *EnumDescriptorProto_EnumReservedRange) String() string { func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} func (x *EnumDescriptorProto_EnumReservedRange) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3227,6 +3913,61 @@ func (x *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { return 0 } +type FieldOptions_EditionDefault struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` // Textproto value. +} + +func (x *FieldOptions_EditionDefault) Reset() { + *x = FieldOptions_EditionDefault{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldOptions_EditionDefault) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldOptions_EditionDefault) ProtoMessage() {} + +func (x *FieldOptions_EditionDefault) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldOptions_EditionDefault.ProtoReflect.Descriptor instead. +func (*FieldOptions_EditionDefault) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 0} +} + +func (x *FieldOptions_EditionDefault) GetEdition() Edition { + if x != nil && x.Edition != nil { + return *x.Edition + } + return Edition_EDITION_UNKNOWN +} + +func (x *FieldOptions_EditionDefault) GetValue() string { + if x != nil && x.Value != nil { + return *x.Value + } + return "" +} + // The name of the uninterpreted option. Each string represents a segment in // a dot-separated name. is_extension is true iff a segment represents an // extension (denoted with parentheses in options specs in .proto files). @@ -3244,7 +3985,7 @@ type UninterpretedOption_NamePart struct { func (x *UninterpretedOption_NamePart) Reset() { *x = UninterpretedOption_NamePart{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + mi := &file_google_protobuf_descriptor_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3257,7 +3998,7 @@ func (x *UninterpretedOption_NamePart) String() string { func (*UninterpretedOption_NamePart) ProtoMessage() {} func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + mi := &file_google_protobuf_descriptor_proto_msgTypes[28] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3287,6 +4028,65 @@ func (x *UninterpretedOption_NamePart) GetIsExtension() bool { return false } +// A map from every known edition with a unique set of defaults to its +// defaults. Not all editions may be contained here. For a given edition, +// the defaults at the closest matching edition ordered at or before it should +// be used. This field must be in strict ascending order by edition. +type FeatureSetDefaults_FeatureSetEditionDefault struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` + Features *FeatureSet `protobuf:"bytes,2,opt,name=features" json:"features,omitempty"` +} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() { + *x = FeatureSetDefaults_FeatureSetEditionDefault{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FeatureSetDefaults_FeatureSetEditionDefault.ProtoReflect.Descriptor instead. +func (*FeatureSetDefaults_FeatureSetEditionDefault) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0} +} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetEdition() Edition { + if x != nil && x.Edition != nil { + return *x.Edition + } + return Edition_EDITION_UNKNOWN +} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + type SourceCodeInfo_Location struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -3296,7 +4096,7 @@ type SourceCodeInfo_Location struct { // location. // // Each element is a field number or an index. They form a path from - // the root FileDescriptorProto to the place where the definition occurs. + // the root FileDescriptorProto to the place where the definition appears. // For example, this path: // // [ 4, 3, 2, 7, 1 ] @@ -3388,7 +4188,7 @@ type SourceCodeInfo_Location struct { func (x *SourceCodeInfo_Location) Reset() { *x = SourceCodeInfo_Location{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3401,7 +4201,7 @@ func (x *SourceCodeInfo_Location) String() string { func (*SourceCodeInfo_Location) ProtoMessage() {} func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3414,7 +4214,7 @@ func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { // Deprecated: Use SourceCodeInfo_Location.ProtoReflect.Descriptor instead. func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0} + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{21, 0} } func (x *SourceCodeInfo_Location) GetPath() []int32 { @@ -3475,7 +4275,7 @@ type GeneratedCodeInfo_Annotation struct { func (x *GeneratedCodeInfo_Annotation) Reset() { *x = GeneratedCodeInfo_Annotation{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[27] + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3488,7 +4288,7 @@ func (x *GeneratedCodeInfo_Annotation) String() string { func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[27] + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3501,7 +4301,7 @@ func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { // Deprecated: Use GeneratedCodeInfo_Annotation.ProtoReflect.Descriptor instead. func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0} + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{22, 0} } func (x *GeneratedCodeInfo_Annotation) GetPath() []int32 { @@ -3550,7 +4350,7 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x04, 0x66, 0x69, - 0x6c, 0x65, 0x22, 0xfe, 0x04, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x6c, 0x65, 0x22, 0x98, 0x05, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, @@ -3588,250 +4388,250 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, 0x0a, 0x0f, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, - 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, - 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, + 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, + 0x0a, 0x0f, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, + 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, - 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64, 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, - 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, - 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, - 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, + 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, + 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, + 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, + 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, + 0x5f, 0x64, 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, + 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, + 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, + 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, + 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, + 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x1a, 0x37, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xcc, 0x04, 0x0a, 0x15, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, + 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, + 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x12, 0x6d, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x3a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x42, 0x03, 0x88, + 0x01, 0x02, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x1a, 0x94, 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, + 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, + 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, + 0x64, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x34, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, + 0x44, 0x45, 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, + 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, + 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, + 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, + 0x12, 0x3e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, + 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, + 0x0a, 0x0b, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, + 0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, + 0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, + 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, + 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, + 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, + 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, + 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, + 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, + 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, + 0x10, 0x0f, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, + 0x44, 0x36, 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, + 0x4e, 0x54, 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, + 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, + 0x41, 0x4c, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, + 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, + 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x63, 0x0a, 0x14, + 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, + 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, + 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, - 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, - 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, - 0xad, 0x04, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, - 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, - 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, - 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x68, - 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x56, 0x65, 0x72, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x3a, 0x0a, - 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xb3, 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, - 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, - 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, - 0x65, 0x12, 0x23, 0x0a, 0x0b, 0x69, 0x73, 0x5f, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0a, 0x69, 0x73, 0x52, 0x65, - 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x22, 0x34, - 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45, 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, - 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, - 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, - 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, - 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, - 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, - 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, - 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, - 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, - 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, - 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, - 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, - 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, - 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, - 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, - 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, - 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, - 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, - 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, - 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, - 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, - 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, - 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, - 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, - 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, - 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, - 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, - 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, - 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, - 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, - 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, - 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, - 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x12, - 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, 0x45, - 0x44, 0x10, 0x03, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, - 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, - 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, - 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, - 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, - 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, - 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, - 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, - 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, - 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, - 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, - 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, + 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, + 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, + 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, - 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, - 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, + 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0x91, 0x09, 0x0a, 0x0b, 0x46, - 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, - 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, - 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, - 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, - 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, - 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, - 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, - 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, - 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, - 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, - 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, - 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, - 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, - 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, - 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, - 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, - 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, - 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, - 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70, 0x5f, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x2a, 0x20, - 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12, 0x70, 0x68, 0x70, 0x47, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, + 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x30, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, + 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x69, 0x6e, 0x67, 0x22, 0x97, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, + 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, + 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, + 0x75, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, + 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, + 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, + 0x76, 0x61, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, + 0x44, 0x0a, 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x5f, 0x65, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, + 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, + 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, + 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, + 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, + 0x1b, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, + 0x76, 0x61, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, + 0x38, 0x12, 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, + 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, + 0x64, 0x65, 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, + 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, + 0x6b, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, + 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, + 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, + 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, @@ -3856,259 +4656,419 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, - 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, - 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, - 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, - 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, - 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xbb, - 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, - 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, - 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, - 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, - 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, - 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, - 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, - 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, - 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, - 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, - 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, - 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, - 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x85, 0x09, 0x0a, - 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, - 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, - 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, - 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, - 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, - 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, - 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, - 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, - 0x0f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, - 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, + 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, + 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, + 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, + 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, + 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, + 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, + 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, + 0x02, 0x4a, 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xf4, 0x03, + 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, + 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, + 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, + 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, + 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, - 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, - 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, - 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, - 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, - 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, - 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, - 0x12, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x42, 0x02, 0x18, 0x01, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, - 0x74, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, - 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, - 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, - 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, - 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, - 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, - 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, - 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, - 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, - 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, - 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, - 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, - 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, - 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, - 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, - 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, - 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, - 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, - 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, - 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, - 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, - 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, - 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, - 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, - 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, - 0x08, 0x04, 0x10, 0x05, 0x22, 0x73, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, - 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, - 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x98, 0x02, 0x0a, 0x0b, 0x45, 0x6e, - 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, - 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, - 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, - 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, - 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, - 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, + 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, + 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, + 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, + 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, + 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, + 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, + 0x08, 0x09, 0x10, 0x0a, 0x22, 0xad, 0x0a, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, + 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, + 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, + 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, + 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, + 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, + 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, + 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, + 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, + 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, + 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, + 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, + 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, + 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, + 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, + 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2e, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, + 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x65, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, + 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, + 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, + 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, - 0x08, 0x05, 0x10, 0x06, 0x22, 0x9e, 0x01, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, - 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, - 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, - 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, - 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9c, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, - 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, - 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, - 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, - 0x80, 0x80, 0x80, 0x02, 0x22, 0xe0, 0x02, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, - 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, - 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, - 0x11, 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, - 0x65, 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, - 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, - 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, - 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, - 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, - 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, - 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, - 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, - 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, - 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, - 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, - 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, + 0x69, 0x6f, 0x6e, 0x1a, 0x5a, 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, + 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, + 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, + 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, + 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, + 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, + 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, + 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, + 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, + 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, + 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, + 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, + 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, + 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, + 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, + 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, + 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, + 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, + 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, + 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, + 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, + 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, + 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, + 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, + 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, - 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, - 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, - 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, - 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, - 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, - 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, - 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, - 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, - 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, - 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, - 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, - 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, - 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, - 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, - 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, - 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, - 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, - 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, - 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, - 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, - 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, - 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, - 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, - 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, - 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd0, - 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, - 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, + 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, + 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, + 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, + 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, + 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, + 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, + 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, + 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, + 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, + 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x81, 0x02, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, + 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0c, + 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, + 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, + 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, + 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, + 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, + 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, + 0x80, 0x80, 0x02, 0x22, 0x99, 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, + 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, + 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, + 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, + 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, + 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, + 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, + 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, + 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, + 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, + 0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, + 0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, + 0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, + 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, + 0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, + 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, + 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, + 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, + 0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, + 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, + 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, + 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x8c, 0x0a, 0x0a, + 0x0a, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x8b, 0x01, 0x0a, 0x0e, + 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, + 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, + 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, + 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, + 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, + 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe8, 0x07, 0x52, 0x0d, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x66, 0x0a, 0x09, 0x65, 0x6e, 0x75, + 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, + 0x70, 0x65, 0x42, 0x23, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0b, + 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x09, 0x12, 0x04, + 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x92, 0x01, 0x0a, 0x17, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, + 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, + 0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, + 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x27, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, + 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0xe6, + 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0x52, + 0x15, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, + 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x78, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, + 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x23, 0x88, 0x01, 0x01, + 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, + 0xe6, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, + 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x78, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x6f, + 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, + 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x20, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, + 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, + 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0xe6, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x7c, 0x0a, 0x0b, 0x6a, 0x73, + 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, + 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x33, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, + 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, + 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0xe6, 0x07, 0xa2, + 0x01, 0x0a, 0x12, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0x52, 0x0a, 0x6a, 0x73, + 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, + 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, 0x49, 0x45, + 0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, + 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, + 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, + 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, 0x51, 0x55, + 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x50, 0x45, + 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, 0x02, 0x22, + 0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, 0x50, 0x45, + 0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, + 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, + 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, + 0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x43, 0x0a, 0x0e, 0x55, 0x74, 0x66, 0x38, 0x56, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x54, 0x46, + 0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, + 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, + 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x03, 0x22, 0x53, 0x0a, 0x0f, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, + 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, + 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, + 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, + 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, + 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, + 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, + 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, + 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, + 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10, 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, + 0x10, 0xe9, 0x07, 0x2a, 0x06, 0x08, 0xe9, 0x07, 0x10, 0xea, 0x07, 0x2a, 0x06, 0x08, 0xea, 0x07, + 0x10, 0xeb, 0x07, 0x2a, 0x06, 0x08, 0x8b, 0x4e, 0x10, 0x90, 0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, + 0x10, 0x91, 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, 0x10, 0xe8, 0x07, 0x22, 0xfe, 0x02, 0x0a, 0x12, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x73, 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, + 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, + 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x1a, 0x87, 0x01, 0x0a, 0x18, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, + 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, + 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xa7, 0x02, 0x0a, + 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, + 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, + 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, - 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, - 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, - 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, - 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, - 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, - 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, - 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, - 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, - 0x02, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, - 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, + 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, + 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, + 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, + 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, + 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, + 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, + 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, + 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, + 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, + 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, + 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, + 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd0, 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, + 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, + 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, + 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, + 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, + 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, + 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, + 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, + 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, + 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, + 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, + 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, + 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, 0x02, 0x2a, 0x92, 0x02, 0x0a, 0x07, 0x45, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, + 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, + 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, + 0x33, 0x10, 0xe7, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, + 0x32, 0x30, 0x32, 0x33, 0x10, 0xe8, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32, 0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, + 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, + 0x59, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, + 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, + 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, + 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, + 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, + 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, + 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, + 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, + 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, + 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, } var ( @@ -4123,103 +5083,136 @@ func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { return file_google_protobuf_descriptor_proto_rawDescData } -var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 10) -var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 28) +var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 17) +var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 32) var file_google_protobuf_descriptor_proto_goTypes = []interface{}{ - (ExtensionRangeOptions_VerificationState)(0), // 0: google.protobuf.ExtensionRangeOptions.VerificationState - (FieldDescriptorProto_Type)(0), // 1: google.protobuf.FieldDescriptorProto.Type - (FieldDescriptorProto_Label)(0), // 2: google.protobuf.FieldDescriptorProto.Label - (FileOptions_OptimizeMode)(0), // 3: google.protobuf.FileOptions.OptimizeMode - (FieldOptions_CType)(0), // 4: google.protobuf.FieldOptions.CType - (FieldOptions_JSType)(0), // 5: google.protobuf.FieldOptions.JSType - (FieldOptions_OptionRetention)(0), // 6: google.protobuf.FieldOptions.OptionRetention - (FieldOptions_OptionTargetType)(0), // 7: google.protobuf.FieldOptions.OptionTargetType - (MethodOptions_IdempotencyLevel)(0), // 8: google.protobuf.MethodOptions.IdempotencyLevel - (GeneratedCodeInfo_Annotation_Semantic)(0), // 9: google.protobuf.GeneratedCodeInfo.Annotation.Semantic - (*FileDescriptorSet)(nil), // 10: google.protobuf.FileDescriptorSet - (*FileDescriptorProto)(nil), // 11: google.protobuf.FileDescriptorProto - (*DescriptorProto)(nil), // 12: google.protobuf.DescriptorProto - (*ExtensionRangeOptions)(nil), // 13: google.protobuf.ExtensionRangeOptions - (*FieldDescriptorProto)(nil), // 14: google.protobuf.FieldDescriptorProto - (*OneofDescriptorProto)(nil), // 15: google.protobuf.OneofDescriptorProto - (*EnumDescriptorProto)(nil), // 16: google.protobuf.EnumDescriptorProto - (*EnumValueDescriptorProto)(nil), // 17: google.protobuf.EnumValueDescriptorProto - (*ServiceDescriptorProto)(nil), // 18: google.protobuf.ServiceDescriptorProto - (*MethodDescriptorProto)(nil), // 19: google.protobuf.MethodDescriptorProto - (*FileOptions)(nil), // 20: google.protobuf.FileOptions - (*MessageOptions)(nil), // 21: google.protobuf.MessageOptions - (*FieldOptions)(nil), // 22: google.protobuf.FieldOptions - (*OneofOptions)(nil), // 23: google.protobuf.OneofOptions - (*EnumOptions)(nil), // 24: google.protobuf.EnumOptions - (*EnumValueOptions)(nil), // 25: google.protobuf.EnumValueOptions - (*ServiceOptions)(nil), // 26: google.protobuf.ServiceOptions - (*MethodOptions)(nil), // 27: google.protobuf.MethodOptions - (*UninterpretedOption)(nil), // 28: google.protobuf.UninterpretedOption - (*SourceCodeInfo)(nil), // 29: google.protobuf.SourceCodeInfo - (*GeneratedCodeInfo)(nil), // 30: google.protobuf.GeneratedCodeInfo - (*DescriptorProto_ExtensionRange)(nil), // 31: google.protobuf.DescriptorProto.ExtensionRange - (*DescriptorProto_ReservedRange)(nil), // 32: google.protobuf.DescriptorProto.ReservedRange - (*ExtensionRangeOptions_Declaration)(nil), // 33: google.protobuf.ExtensionRangeOptions.Declaration - (*EnumDescriptorProto_EnumReservedRange)(nil), // 34: google.protobuf.EnumDescriptorProto.EnumReservedRange - (*UninterpretedOption_NamePart)(nil), // 35: google.protobuf.UninterpretedOption.NamePart - (*SourceCodeInfo_Location)(nil), // 36: google.protobuf.SourceCodeInfo.Location - (*GeneratedCodeInfo_Annotation)(nil), // 37: google.protobuf.GeneratedCodeInfo.Annotation + (Edition)(0), // 0: google.protobuf.Edition + (ExtensionRangeOptions_VerificationState)(0), // 1: google.protobuf.ExtensionRangeOptions.VerificationState + (FieldDescriptorProto_Type)(0), // 2: google.protobuf.FieldDescriptorProto.Type + (FieldDescriptorProto_Label)(0), // 3: google.protobuf.FieldDescriptorProto.Label + (FileOptions_OptimizeMode)(0), // 4: google.protobuf.FileOptions.OptimizeMode + (FieldOptions_CType)(0), // 5: google.protobuf.FieldOptions.CType + (FieldOptions_JSType)(0), // 6: google.protobuf.FieldOptions.JSType + (FieldOptions_OptionRetention)(0), // 7: google.protobuf.FieldOptions.OptionRetention + (FieldOptions_OptionTargetType)(0), // 8: google.protobuf.FieldOptions.OptionTargetType + (MethodOptions_IdempotencyLevel)(0), // 9: google.protobuf.MethodOptions.IdempotencyLevel + (FeatureSet_FieldPresence)(0), // 10: google.protobuf.FeatureSet.FieldPresence + (FeatureSet_EnumType)(0), // 11: google.protobuf.FeatureSet.EnumType + (FeatureSet_RepeatedFieldEncoding)(0), // 12: google.protobuf.FeatureSet.RepeatedFieldEncoding + (FeatureSet_Utf8Validation)(0), // 13: google.protobuf.FeatureSet.Utf8Validation + (FeatureSet_MessageEncoding)(0), // 14: google.protobuf.FeatureSet.MessageEncoding + (FeatureSet_JsonFormat)(0), // 15: google.protobuf.FeatureSet.JsonFormat + (GeneratedCodeInfo_Annotation_Semantic)(0), // 16: google.protobuf.GeneratedCodeInfo.Annotation.Semantic + (*FileDescriptorSet)(nil), // 17: google.protobuf.FileDescriptorSet + (*FileDescriptorProto)(nil), // 18: google.protobuf.FileDescriptorProto + (*DescriptorProto)(nil), // 19: google.protobuf.DescriptorProto + (*ExtensionRangeOptions)(nil), // 20: google.protobuf.ExtensionRangeOptions + (*FieldDescriptorProto)(nil), // 21: google.protobuf.FieldDescriptorProto + (*OneofDescriptorProto)(nil), // 22: google.protobuf.OneofDescriptorProto + (*EnumDescriptorProto)(nil), // 23: google.protobuf.EnumDescriptorProto + (*EnumValueDescriptorProto)(nil), // 24: google.protobuf.EnumValueDescriptorProto + (*ServiceDescriptorProto)(nil), // 25: google.protobuf.ServiceDescriptorProto + (*MethodDescriptorProto)(nil), // 26: google.protobuf.MethodDescriptorProto + (*FileOptions)(nil), // 27: google.protobuf.FileOptions + (*MessageOptions)(nil), // 28: google.protobuf.MessageOptions + (*FieldOptions)(nil), // 29: google.protobuf.FieldOptions + (*OneofOptions)(nil), // 30: google.protobuf.OneofOptions + (*EnumOptions)(nil), // 31: google.protobuf.EnumOptions + (*EnumValueOptions)(nil), // 32: google.protobuf.EnumValueOptions + (*ServiceOptions)(nil), // 33: google.protobuf.ServiceOptions + (*MethodOptions)(nil), // 34: google.protobuf.MethodOptions + (*UninterpretedOption)(nil), // 35: google.protobuf.UninterpretedOption + (*FeatureSet)(nil), // 36: google.protobuf.FeatureSet + (*FeatureSetDefaults)(nil), // 37: google.protobuf.FeatureSetDefaults + (*SourceCodeInfo)(nil), // 38: google.protobuf.SourceCodeInfo + (*GeneratedCodeInfo)(nil), // 39: google.protobuf.GeneratedCodeInfo + (*DescriptorProto_ExtensionRange)(nil), // 40: google.protobuf.DescriptorProto.ExtensionRange + (*DescriptorProto_ReservedRange)(nil), // 41: google.protobuf.DescriptorProto.ReservedRange + (*ExtensionRangeOptions_Declaration)(nil), // 42: google.protobuf.ExtensionRangeOptions.Declaration + (*EnumDescriptorProto_EnumReservedRange)(nil), // 43: google.protobuf.EnumDescriptorProto.EnumReservedRange + (*FieldOptions_EditionDefault)(nil), // 44: google.protobuf.FieldOptions.EditionDefault + (*UninterpretedOption_NamePart)(nil), // 45: google.protobuf.UninterpretedOption.NamePart + (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 46: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + (*SourceCodeInfo_Location)(nil), // 47: google.protobuf.SourceCodeInfo.Location + (*GeneratedCodeInfo_Annotation)(nil), // 48: google.protobuf.GeneratedCodeInfo.Annotation } var file_google_protobuf_descriptor_proto_depIdxs = []int32{ - 11, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto - 12, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto - 16, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 18, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto - 14, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 20, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions - 29, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo - 14, // 7: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto - 14, // 8: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 12, // 9: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto - 16, // 10: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 31, // 11: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange - 15, // 12: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto - 21, // 13: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions - 32, // 14: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange - 28, // 15: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 33, // 16: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration - 0, // 17: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState - 2, // 18: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label - 1, // 19: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type - 22, // 20: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions - 23, // 21: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions - 17, // 22: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto - 24, // 23: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions - 34, // 24: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange - 25, // 25: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions - 19, // 26: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto - 26, // 27: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions - 27, // 28: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions - 3, // 29: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode - 28, // 30: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 28, // 31: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 4, // 32: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType - 5, // 33: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType - 6, // 34: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention - 7, // 35: google.protobuf.FieldOptions.target:type_name -> google.protobuf.FieldOptions.OptionTargetType - 7, // 36: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType - 28, // 37: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 28, // 38: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 28, // 39: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 28, // 40: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 28, // 41: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 8, // 42: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel - 28, // 43: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 35, // 44: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart - 36, // 45: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location - 37, // 46: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation - 13, // 47: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions - 9, // 48: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic - 49, // [49:49] is the sub-list for method output_type - 49, // [49:49] is the sub-list for method input_type - 49, // [49:49] is the sub-list for extension type_name - 49, // [49:49] is the sub-list for extension extendee - 0, // [0:49] is the sub-list for field type_name + 18, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto + 19, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto + 23, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 25, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto + 21, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 27, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions + 38, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo + 0, // 7: google.protobuf.FileDescriptorProto.edition:type_name -> google.protobuf.Edition + 21, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto + 21, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 19, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto + 23, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 40, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange + 22, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto + 28, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions + 41, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange + 35, // 16: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 42, // 17: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration + 36, // 18: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet + 1, // 19: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState + 3, // 20: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label + 2, // 21: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type + 29, // 22: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions + 30, // 23: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions + 24, // 24: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto + 31, // 25: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions + 43, // 26: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange + 32, // 27: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions + 26, // 28: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto + 33, // 29: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions + 34, // 30: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions + 4, // 31: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode + 36, // 32: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 33: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 34: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 35: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 5, // 36: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType + 6, // 37: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType + 7, // 38: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention + 8, // 39: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType + 44, // 40: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault + 36, // 41: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 42: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 43: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 44: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 45: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 46: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 47: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 48: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 49: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 50: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 9, // 51: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel + 36, // 52: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 53: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 45, // 54: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart + 10, // 55: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence + 11, // 56: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType + 12, // 57: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding + 13, // 58: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation + 14, // 59: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding + 15, // 60: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat + 46, // 61: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + 0, // 62: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition + 0, // 63: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition + 47, // 64: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location + 48, // 65: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation + 20, // 66: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions + 0, // 67: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition + 0, // 68: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition + 36, // 69: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.features:type_name -> google.protobuf.FeatureSet + 16, // 70: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic + 71, // [71:71] is the sub-list for method output_type + 71, // [71:71] is the sub-list for method input_type + 71, // [71:71] is the sub-list for extension type_name + 71, // [71:71] is the sub-list for extension extendee + 0, // [0:71] is the sub-list for field type_name } func init() { file_google_protobuf_descriptor_proto_init() } @@ -4475,19 +5468,21 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SourceCodeInfo); i { + switch v := v.(*FeatureSet); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields + case 3: + return &v.extensionFields default: return nil } } file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GeneratedCodeInfo); i { + switch v := v.(*FeatureSetDefaults); i { case 0: return &v.state case 1: @@ -4499,7 +5494,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DescriptorProto_ExtensionRange); i { + switch v := v.(*SourceCodeInfo); i { case 0: return &v.state case 1: @@ -4511,7 +5506,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DescriptorProto_ReservedRange); i { + switch v := v.(*GeneratedCodeInfo); i { case 0: return &v.state case 1: @@ -4523,7 +5518,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExtensionRangeOptions_Declaration); i { + switch v := v.(*DescriptorProto_ExtensionRange); i { case 0: return &v.state case 1: @@ -4535,7 +5530,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { + switch v := v.(*DescriptorProto_ReservedRange); i { case 0: return &v.state case 1: @@ -4547,7 +5542,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UninterpretedOption_NamePart); i { + switch v := v.(*ExtensionRangeOptions_Declaration); i { case 0: return &v.state case 1: @@ -4559,7 +5554,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SourceCodeInfo_Location); i { + switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { case 0: return &v.state case 1: @@ -4571,6 +5566,54 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FieldOptions_EditionDefault); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UninterpretedOption_NamePart); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FeatureSetDefaults_FeatureSetEditionDefault); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceCodeInfo_Location); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GeneratedCodeInfo_Annotation); i { case 0: return &v.state @@ -4588,8 +5631,8 @@ func file_google_protobuf_descriptor_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_protobuf_descriptor_proto_rawDesc, - NumEnums: 10, - NumMessages: 28, + NumEnums: 17, + NumMessages: 32, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go new file mode 100644 index 00000000..25de5ae0 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go @@ -0,0 +1,177 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2023 Google Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: reflect/protodesc/proto/go_features.proto + +package proto + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" + sync "sync" +) + +type GoFeatures struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Whether or not to generate the deprecated UnmarshalJSON method for enums. + LegacyUnmarshalJsonEnum *bool `protobuf:"varint,1,opt,name=legacy_unmarshal_json_enum,json=legacyUnmarshalJsonEnum" json:"legacy_unmarshal_json_enum,omitempty"` +} + +func (x *GoFeatures) Reset() { + *x = GoFeatures{} + if protoimpl.UnsafeEnabled { + mi := &file_reflect_protodesc_proto_go_features_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GoFeatures) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GoFeatures) ProtoMessage() {} + +func (x *GoFeatures) ProtoReflect() protoreflect.Message { + mi := &file_reflect_protodesc_proto_go_features_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GoFeatures.ProtoReflect.Descriptor instead. +func (*GoFeatures) Descriptor() ([]byte, []int) { + return file_reflect_protodesc_proto_go_features_proto_rawDescGZIP(), []int{0} +} + +func (x *GoFeatures) GetLegacyUnmarshalJsonEnum() bool { + if x != nil && x.LegacyUnmarshalJsonEnum != nil { + return *x.LegacyUnmarshalJsonEnum + } + return false +} + +var file_reflect_protodesc_proto_go_features_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FeatureSet)(nil), + ExtensionType: (*GoFeatures)(nil), + Field: 1002, + Name: "google.protobuf.go", + Tag: "bytes,1002,opt,name=go", + Filename: "reflect/protodesc/proto/go_features.proto", + }, +} + +// Extension fields to descriptorpb.FeatureSet. +var ( + // optional google.protobuf.GoFeatures go = 1002; + E_Go = &file_reflect_protodesc_proto_go_features_proto_extTypes[0] +) + +var File_reflect_protodesc_proto_go_features_proto protoreflect.FileDescriptor + +var file_reflect_protodesc_proto_go_features_proto_rawDesc = []byte{ + 0x0a, 0x29, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x64, + 0x65, 0x73, 0x63, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x1a, 0x20, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6a, + 0x0a, 0x0a, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x5c, 0x0a, 0x1a, + 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, + 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x42, 0x1f, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x74, 0x72, 0x75, + 0x65, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x18, 0xe7, + 0x07, 0x52, 0x17, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, + 0x61, 0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x3a, 0x49, 0x0a, 0x02, 0x67, 0x6f, + 0x12, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x34, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x64, 0x65, 0x73, 0x63, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, +} + +var ( + file_reflect_protodesc_proto_go_features_proto_rawDescOnce sync.Once + file_reflect_protodesc_proto_go_features_proto_rawDescData = file_reflect_protodesc_proto_go_features_proto_rawDesc +) + +func file_reflect_protodesc_proto_go_features_proto_rawDescGZIP() []byte { + file_reflect_protodesc_proto_go_features_proto_rawDescOnce.Do(func() { + file_reflect_protodesc_proto_go_features_proto_rawDescData = protoimpl.X.CompressGZIP(file_reflect_protodesc_proto_go_features_proto_rawDescData) + }) + return file_reflect_protodesc_proto_go_features_proto_rawDescData +} + +var file_reflect_protodesc_proto_go_features_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_reflect_protodesc_proto_go_features_proto_goTypes = []interface{}{ + (*GoFeatures)(nil), // 0: google.protobuf.GoFeatures + (*descriptorpb.FeatureSet)(nil), // 1: google.protobuf.FeatureSet +} +var file_reflect_protodesc_proto_go_features_proto_depIdxs = []int32{ + 1, // 0: google.protobuf.go:extendee -> google.protobuf.FeatureSet + 0, // 1: google.protobuf.go:type_name -> google.protobuf.GoFeatures + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 1, // [1:2] is the sub-list for extension type_name + 0, // [0:1] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_reflect_protodesc_proto_go_features_proto_init() } +func file_reflect_protodesc_proto_go_features_proto_init() { + if File_reflect_protodesc_proto_go_features_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_reflect_protodesc_proto_go_features_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GoFeatures); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_reflect_protodesc_proto_go_features_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_reflect_protodesc_proto_go_features_proto_goTypes, + DependencyIndexes: file_reflect_protodesc_proto_go_features_proto_depIdxs, + MessageInfos: file_reflect_protodesc_proto_go_features_proto_msgTypes, + ExtensionInfos: file_reflect_protodesc_proto_go_features_proto_extTypes, + }.Build() + File_reflect_protodesc_proto_go_features_proto = out.File + file_reflect_protodesc_proto_go_features_proto_rawDesc = nil + file_reflect_protodesc_proto_go_features_proto_goTypes = nil + file_reflect_protodesc_proto_go_features_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.proto b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.proto new file mode 100644 index 00000000..d2465712 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.proto @@ -0,0 +1,28 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2023 Google Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd + +syntax = "proto2"; + +package google.protobuf; + +import "google/protobuf/descriptor.proto"; + +option go_package = "google.golang.org/protobuf/types/gofeaturespb"; + +extend google.protobuf.FeatureSet { + optional GoFeatures go = 1002; +} + +message GoFeatures { + // Whether or not to generate the deprecated UnmarshalJSON method for enums. + optional bool legacy_unmarshal_json_enum = 1 [ + retention = RETENTION_RUNTIME, + targets = TARGET_TYPE_ENUM, + edition_defaults = { edition: EDITION_PROTO2, value: "true" }, + edition_defaults = { edition: EDITION_PROTO3, value: "false" } + ]; +} diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 580b232f..9de51be5 100644 --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -237,7 +237,8 @@ type Any struct { // // Note: this functionality is not currently available in the official // protobuf release, and it is not used for type URLs beginning with - // type.googleapis.com. + // type.googleapis.com. As of May 2023, there are no widely used type server + // implementations and no plans to implement one. // // Schemes other than `http`, `https` (or the empty scheme) might be // used with implementation specific semantics. diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/CHANGELOG.md b/vendor/gopkg.in/go-jose/go-jose.v2/CHANGELOG.md new file mode 100644 index 00000000..8e6e9132 --- /dev/null +++ b/vendor/gopkg.in/go-jose/go-jose.v2/CHANGELOG.md @@ -0,0 +1,84 @@ +# v4.0.1 + +## Fixed + + - An attacker could send a JWE containing compressed data that used large + amounts of memory and CPU when decompressed by `Decrypt` or `DecryptMulti`. + Those functions now return an error if the decompressed data would exceed + 250kB or 10x the compressed size (whichever is larger). Thanks to + Enze Wang@Alioth and Jianjun Chen@Zhongguancun Lab (@zer0yu and @chenjj) + for reporting. + +# v4.0.0 + +This release makes some breaking changes in order to more thoroughly +address the vulnerabilities discussed in [Three New Attacks Against JSON Web +Tokens][1], "Sign/encrypt confusion", "Billion hash attack", and "Polyglot +token". + +## Changed + + - Limit JWT encryption types (exclude password or public key types) (#78) + - Enforce minimum length for HMAC keys (#85) + - jwt: match any audience in a list, rather than requiring all audiences (#81) + - jwt: accept only Compact Serialization (#75) + - jws: Add expected algorithms for signatures (#74) + - Require specifying expected algorithms for ParseEncrypted, + ParseSigned, ParseDetached, jwt.ParseEncrypted, jwt.ParseSigned, + jwt.ParseSignedAndEncrypted (#69, #74) + - Usually there is a small, known set of appropriate algorithms for a program + to use and it's a mistake to allow unexpected algorithms. For instance the + "billion hash attack" relies in part on programs accepting the PBES2 + encryption algorithm and doing the necessary work even if they weren't + specifically configured to allow PBES2. + - Revert "Strip padding off base64 strings" (#82) + - The specs require base64url encoding without padding. + - Minimum supported Go version is now 1.21 + +## Added + + - ParseSignedCompact, ParseSignedJSON, ParseEncryptedCompact, ParseEncryptedJSON. + - These allow parsing a specific serialization, as opposed to ParseSigned and + ParseEncrypted, which try to automatically detect which serialization was + provided. It's common to require a specific serialization for a specific + protocol - for instance JWT requires Compact serialization. + +[1]: https://i.blackhat.com/BH-US-23/Presentations/US-23-Tervoort-Three-New-Attacks-Against-JSON-Web-Tokens.pdf + +# v3.0.3 + +## Fixed + + - Limit decompression output size to prevent a DoS. Backport from v4.0.1. + +# v3.0.2 + +## Fixed + + - DecryptMulti: handle decompression error (#19) + +## Changed + + - jwe/CompactSerialize: improve performance (#67) + - Increase the default number of PBKDF2 iterations to 600k (#48) + - Return the proper algorithm for ECDSA keys (#45) + +## Added + + - Add Thumbprint support for opaque signers (#38) + +# v3.0.1 + +## Fixed + + - Security issue: an attacker specifying a large "p2c" value can cause + JSONWebEncryption.Decrypt and JSONWebEncryption.DecryptMulti to consume large + amounts of CPU, causing a DoS. Thanks to Matt Schwager (@mschwager) for the + disclosure and to Tom Tervoort for originally publishing the category of attack. + https://i.blackhat.com/BH-US-23/Presentations/US-23-Tervoort-Three-New-Attacks-Against-JSON-Web-Tokens.pdf + +# v2.6.3 + +## Fixed + + - Limit decompression output size to prevent a DoS. Backport from v4.0.1. diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/README.md b/vendor/gopkg.in/go-jose/go-jose.v2/README.md index 46b02d61..b877f412 100644 --- a/vendor/gopkg.in/go-jose/go-jose.v2/README.md +++ b/vendor/gopkg.in/go-jose/go-jose.v2/README.md @@ -1,118 +1,4 @@ -# Go JOSE +# go-jose v2 -[![godoc](http://img.shields.io/badge/godoc-version_1-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v1) -[![godoc](http://img.shields.io/badge/godoc-version_2-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2) -[![license](http://img.shields.io/badge/license-apache_2.0-blue.svg?style=flat)](https://raw.githubusercontent.com/go-jose/go-jose/master/LICENSE) -[![build](https://travis-ci.org/go-jose/go-jose.svg?branch=v2)](https://travis-ci.org/go-jose/go-jose) -[![coverage](https://coveralls.io/repos/github/go-jose/go-jose/badge.svg?branch=v2)](https://coveralls.io/r/go-jose/go-jose) - -Package jose aims to provide an implementation of the Javascript Object Signing -and Encryption set of standards. This includes support for JSON Web Encryption, -JSON Web Signature, and JSON Web Token standards. - -**Disclaimer**: This library contains encryption software that is subject to -the U.S. Export Administration Regulations. You may not export, re-export, -transfer or download this code or any part of it in violation of any United -States law, directive or regulation. In particular this software may not be -exported or re-exported in any form or on any media to Iran, North Sudan, -Syria, Cuba, or North Korea, or to denied persons or entities mentioned on any -US maintained blocked list. - -## Overview - -The implementation follows the -[JSON Web Encryption](http://dx.doi.org/10.17487/RFC7516) (RFC 7516), -[JSON Web Signature](http://dx.doi.org/10.17487/RFC7515) (RFC 7515), and -[JSON Web Token](http://dx.doi.org/10.17487/RFC7519) (RFC 7519). -Tables of supported algorithms are shown below. The library supports both -the compact and full serialization formats, and has optional support for -multiple recipients. It also comes with a small command-line utility -([`jose-util`](https://github.com/go-jose/go-jose/tree/v2/jose-util)) -for dealing with JOSE messages in a shell. - -**Note**: We use a forked version of the `encoding/json` package from the Go -standard library which uses case-sensitive matching for member names (instead -of [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html)). -This is to avoid differences in interpretation of messages between go-jose and -libraries in other languages. - -### Versions - -We use [gopkg.in](https://gopkg.in) for versioning. - -[Version 2](https://gopkg.in/go-jose/go-jose.v2) -([branch](https://github.com/go-jose/go-jose/tree/v2), -[doc](https://godoc.org/gopkg.in/go-jose/go-jose.v2)) is the current version: - - import "gopkg.in/go-jose/go-jose.v2" - -The old `v1` branch ([go-jose.v1](https://gopkg.in/go-jose/go-jose.v1)) will -still receive backported bug fixes and security fixes, but otherwise -development is frozen. All new feature development takes place on the `v2` -branch. Version 2 also contains additional sub-packages such as the -[jwt](https://godoc.org/gopkg.in/go-jose/go-jose.v2/jwt) implementation -contributed by [@shaxbee](https://github.com/shaxbee). - -### Supported algorithms - -See below for a table of supported algorithms. Algorithm identifiers match -the names in the [JSON Web Algorithms](http://dx.doi.org/10.17487/RFC7518) -standard where possible. The Godoc reference has a list of constants. - - Key encryption | Algorithm identifier(s) - :------------------------- | :------------------------------ - RSA-PKCS#1v1.5 | RSA1_5 - RSA-OAEP | RSA-OAEP, RSA-OAEP-256 - AES key wrap | A128KW, A192KW, A256KW - AES-GCM key wrap | A128GCMKW, A192GCMKW, A256GCMKW - ECDH-ES + AES key wrap | ECDH-ES+A128KW, ECDH-ES+A192KW, ECDH-ES+A256KW - ECDH-ES (direct) | ECDH-ES1 - Direct encryption | dir1 - -1. Not supported in multi-recipient mode - - Signing / MAC | Algorithm identifier(s) - :------------------------- | :------------------------------ - RSASSA-PKCS#1v1.5 | RS256, RS384, RS512 - RSASSA-PSS | PS256, PS384, PS512 - HMAC | HS256, HS384, HS512 - ECDSA | ES256, ES384, ES512 - Ed25519 | EdDSA2 - -2. Only available in version 2 of the package - - Content encryption | Algorithm identifier(s) - :------------------------- | :------------------------------ - AES-CBC+HMAC | A128CBC-HS256, A192CBC-HS384, A256CBC-HS512 - AES-GCM | A128GCM, A192GCM, A256GCM - - Compression | Algorithm identifiers(s) - :------------------------- | ------------------------------- - DEFLATE (RFC 1951) | DEF - -### Supported key types - -See below for a table of supported key types. These are understood by the -library, and can be passed to corresponding functions such as `NewEncrypter` or -`NewSigner`. Each of these keys can also be wrapped in a JWK if desired, which -allows attaching a key id. - - Algorithm(s) | Corresponding types - :------------------------- | ------------------------------- - RSA | *[rsa.PublicKey](http://golang.org/pkg/crypto/rsa/#PublicKey), *[rsa.PrivateKey](http://golang.org/pkg/crypto/rsa/#PrivateKey) - ECDH, ECDSA | *[ecdsa.PublicKey](http://golang.org/pkg/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](http://golang.org/pkg/crypto/ecdsa/#PrivateKey) - EdDSA1 | [ed25519.PublicKey](https://godoc.org/golang.org/x/crypto/ed25519#PublicKey), [ed25519.PrivateKey](https://godoc.org/golang.org/x/crypto/ed25519#PrivateKey) - AES, HMAC | []byte - -1. Only available in version 2 of the package - -## Examples - -[![godoc](http://img.shields.io/badge/godoc-version_1-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v1) -[![godoc](http://img.shields.io/badge/godoc-version_2-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2) - -Examples can be found in the Godoc -reference for this package. The -[`jose-util`](https://github.com/go-jose/go-jose/tree/v2/jose-util) -subdirectory also contains a small command-line utility which might be useful -as an example. +Version 2 of this library is no longer supported. [Please use v4 +instead](https://pkg.go.dev/github.com/go-jose/go-jose/v4). diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/asymmetric.go b/vendor/gopkg.in/go-jose/go-jose.v2/asymmetric.go index 3ca79cc2..43f9ce2f 100644 --- a/vendor/gopkg.in/go-jose/go-jose.v2/asymmetric.go +++ b/vendor/gopkg.in/go-jose/go-jose.v2/asymmetric.go @@ -285,6 +285,9 @@ func (ctx rsaDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm switch alg { case RS256, RS384, RS512: + // TODO(https://github.com/go-jose/go-jose/issues/40): As of go1.20, the + // random parameter is legacy and ignored, and it can be nil. + // https://cs.opensource.google/go/go/+/refs/tags/go1.20:src/crypto/rsa/pkcs1v15.go;l=263;bpv=0;bpt=1 out, err = rsa.SignPKCS1v15(RandReader, ctx.privateKey, hash, hashed) case PS256, PS384, PS512: out, err = rsa.SignPSS(RandReader, ctx.privateKey, hash, hashed, &rsa.PSSOptions{ diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/crypter.go b/vendor/gopkg.in/go-jose/go-jose.v2/crypter.go index 73aab0fa..0ae2e5eb 100644 --- a/vendor/gopkg.in/go-jose/go-jose.v2/crypter.go +++ b/vendor/gopkg.in/go-jose/go-jose.v2/crypter.go @@ -406,6 +406,9 @@ func (ctx *genericEncrypter) Options() EncrypterOptions { // Decrypt and validate the object and return the plaintext. Note that this // function does not support multi-recipient, if you desire multi-recipient // decryption use DecryptMulti instead. +// +// Automatically decompresses plaintext, but returns an error if the decompressed +// data would be >250kB or >10x the size of the compressed data, whichever is larger. func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) { headers := obj.mergedHeaders(nil) @@ -470,6 +473,9 @@ func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) // with support for multiple recipients. It returns the index of the recipient // for which the decryption was successful, the merged headers for that recipient, // and the plaintext. +// +// Automatically decompresses plaintext, but returns an error if the decompressed +// data would be >250kB or >3x the size of the compressed data, whichever is larger. func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Header, []byte, error) { globalHeaders := obj.mergedHeaders(nil) diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/encoding.go b/vendor/gopkg.in/go-jose/go-jose.v2/encoding.go index 40b688b3..636f6c8f 100644 --- a/vendor/gopkg.in/go-jose/go-jose.v2/encoding.go +++ b/vendor/gopkg.in/go-jose/go-jose.v2/encoding.go @@ -21,6 +21,7 @@ import ( "compress/flate" "encoding/base64" "encoding/binary" + "fmt" "io" "math/big" "strings" @@ -85,7 +86,7 @@ func decompress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) { } } -// Compress with DEFLATE +// deflate compresses the input. func deflate(input []byte) ([]byte, error) { output := new(bytes.Buffer) @@ -97,15 +98,27 @@ func deflate(input []byte) ([]byte, error) { return output.Bytes(), err } -// Decompress with DEFLATE +// inflate decompresses the input. +// +// Errors if the decompressed data would be >250kB or >10x the size of the +// compressed data, whichever is larger. func inflate(input []byte) ([]byte, error) { output := new(bytes.Buffer) reader := flate.NewReader(bytes.NewBuffer(input)) - _, err := io.Copy(output, reader) - if err != nil { + maxCompressedSize := 10 * int64(len(input)) + if maxCompressedSize < 250000 { + maxCompressedSize = 250000 + } + + limit := maxCompressedSize + 1 + n, err := io.CopyN(output, reader, limit) + if err != nil && err != io.EOF { return nil, err } + if n == limit { + return nil, fmt.Errorf("uncompressed data would be too large (>%d bytes)", maxCompressedSize) + } err = reader.Close() return output.Bytes(), err diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/symmetric.go b/vendor/gopkg.in/go-jose/go-jose.v2/symmetric.go index 2b8076f5..52c8b62b 100644 --- a/vendor/gopkg.in/go-jose/go-jose.v2/symmetric.go +++ b/vendor/gopkg.in/go-jose/go-jose.v2/symmetric.go @@ -402,6 +402,11 @@ func (ctx *symmetricKeyCipher) decryptKey(headers rawHeader, recipient *recipien if p2c <= 0 { return nil, fmt.Errorf("go-jose/go-jose: invalid P2C: must be a positive integer") } + if p2c > 1000000 { + // An unauthenticated attacker can set a high P2C value. Set an upper limit to avoid + // DoS attacks. + return nil, fmt.Errorf("go-jose/go-jose: invalid P2C: too high") + } // salt is UTF8(Alg) || 0x00 || Salt Input alg := headers.getAlgorithm() diff --git a/vendor/modules.txt b/vendor/modules.txt index 97379b28..a62f4fa6 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -18,7 +18,7 @@ github.com/Microsoft/go-winio/internal/socket github.com/Microsoft/go-winio/internal/stringbuffer github.com/Microsoft/go-winio/pkg/guid github.com/Microsoft/go-winio/vhd -# github.com/Microsoft/hcsshim v0.12.0-rc.1 +# github.com/Microsoft/hcsshim v0.12.0-rc.2 ## explicit; go 1.18 github.com/Microsoft/hcsshim github.com/Microsoft/hcsshim/computestorage @@ -51,16 +51,16 @@ github.com/VividCortex/ewma # github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d ## explicit github.com/acarl005/stripansi +# github.com/aead/serpent v0.0.0-20160714141033-fba169763ea6 +## explicit +github.com/aead/serpent # github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 ## explicit; go 1.13 github.com/asaskevich/govalidator # github.com/chzyer/readline v1.5.1 ## explicit; go 1.15 github.com/chzyer/readline -# github.com/container-orchestrated-devices/container-device-interface v0.6.1 -## explicit; go 1.17 -github.com/container-orchestrated-devices/container-device-interface/pkg/parser -# github.com/containerd/cgroups/v3 v3.0.2 +# github.com/containerd/cgroups/v3 v3.0.3 ## explicit; go 1.18 github.com/containerd/cgroups/v3/cgroup1/stats # github.com/containerd/containerd v1.7.11 @@ -76,6 +76,9 @@ github.com/containerd/log ## explicit; go 1.19 github.com/containerd/stargz-snapshotter/estargz github.com/containerd/stargz-snapshotter/estargz/errorutil +# github.com/containerd/typeurl/v2 v2.1.1 +## explicit; go 1.13 +github.com/containerd/typeurl/v2 # github.com/containernetworking/cni v1.1.2 ## explicit; go 1.14 github.com/containernetworking/cni/libcni @@ -88,11 +91,11 @@ github.com/containernetworking/cni/pkg/types/create github.com/containernetworking/cni/pkg/types/internal github.com/containernetworking/cni/pkg/utils github.com/containernetworking/cni/pkg/version -# github.com/containernetworking/plugins v1.3.0 +# github.com/containernetworking/plugins v1.4.0 ## explicit; go 1.20 github.com/containernetworking/plugins/pkg/ns -# github.com/containers/buildah v1.30.0 -## explicit; go 1.18 +# github.com/containers/buildah v1.33.7 +## explicit; go 1.20 github.com/containers/buildah github.com/containers/buildah/bind github.com/containers/buildah/chroot @@ -101,8 +104,13 @@ github.com/containers/buildah/define github.com/containers/buildah/docker github.com/containers/buildah/imagebuildah github.com/containers/buildah/internal +github.com/containers/buildah/internal/config +github.com/containers/buildah/internal/mkcw +github.com/containers/buildah/internal/mkcw/types github.com/containers/buildah/internal/parse +github.com/containers/buildah/internal/tmpdir github.com/containers/buildah/internal/util +github.com/containers/buildah/internal/volumes github.com/containers/buildah/pkg/blobcache github.com/containers/buildah/pkg/chrootuser github.com/containers/buildah/pkg/jail @@ -112,25 +120,31 @@ github.com/containers/buildah/pkg/rusage github.com/containers/buildah/pkg/sshagent github.com/containers/buildah/pkg/util github.com/containers/buildah/util -# github.com/containers/common v0.56.0 +# github.com/containers/common v0.57.4 ## explicit; go 1.18 +github.com/containers/common/internal/attributedstring github.com/containers/common/libimage github.com/containers/common/libimage/define github.com/containers/common/libimage/filter github.com/containers/common/libimage/manifests +github.com/containers/common/libimage/platform github.com/containers/common/libnetwork/cni github.com/containers/common/libnetwork/etchosts github.com/containers/common/libnetwork/internal/util github.com/containers/common/libnetwork/netavark github.com/containers/common/libnetwork/network +github.com/containers/common/libnetwork/pasta github.com/containers/common/libnetwork/resolvconf +github.com/containers/common/libnetwork/slirp4netns github.com/containers/common/libnetwork/types github.com/containers/common/libnetwork/util github.com/containers/common/pkg/apparmor github.com/containers/common/pkg/apparmor/internal/supported +github.com/containers/common/pkg/auth github.com/containers/common/pkg/capabilities github.com/containers/common/pkg/cgroupv2 github.com/containers/common/pkg/chown +github.com/containers/common/pkg/completion github.com/containers/common/pkg/config github.com/containers/common/pkg/download github.com/containers/common/pkg/filters @@ -141,16 +155,20 @@ github.com/containers/common/pkg/hooks/exec github.com/containers/common/pkg/machine github.com/containers/common/pkg/manifests github.com/containers/common/pkg/parse +github.com/containers/common/pkg/password github.com/containers/common/pkg/retry +github.com/containers/common/pkg/rootlessport github.com/containers/common/pkg/seccomp +github.com/containers/common/pkg/servicereaper github.com/containers/common/pkg/signal github.com/containers/common/pkg/subscriptions github.com/containers/common/pkg/supplemented github.com/containers/common/pkg/timetype github.com/containers/common/pkg/umask github.com/containers/common/pkg/util +github.com/containers/common/pkg/version github.com/containers/common/version -# github.com/containers/image/v5 v5.28.0 +# github.com/containers/image/v5 v5.29.2 ## explicit; go 1.19 github.com/containers/image/v5/copy github.com/containers/image/v5/directory @@ -220,15 +238,16 @@ github.com/containers/image/v5/version # github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 ## explicit github.com/containers/libtrust +# github.com/containers/luksy v0.0.0-20231127213545-c2b9b9dbf004 +## explicit; go 1.20 +github.com/containers/luksy # github.com/containers/ocicrypt v1.1.9 ## explicit; go 1.20 github.com/containers/ocicrypt github.com/containers/ocicrypt/blockcipher github.com/containers/ocicrypt/config github.com/containers/ocicrypt/config/keyprovider-config -github.com/containers/ocicrypt/config/pkcs11config github.com/containers/ocicrypt/crypto/pkcs11 -github.com/containers/ocicrypt/helpers github.com/containers/ocicrypt/keywrap github.com/containers/ocicrypt/keywrap/jwe github.com/containers/ocicrypt/keywrap/keyprovider @@ -238,7 +257,7 @@ github.com/containers/ocicrypt/keywrap/pkcs7 github.com/containers/ocicrypt/spec github.com/containers/ocicrypt/utils github.com/containers/ocicrypt/utils/keyprovider -# github.com/containers/storage v1.50.2 +# github.com/containers/storage v1.51.0 ## explicit; go 1.19 github.com/containers/storage github.com/containers/storage/drivers @@ -257,6 +276,7 @@ github.com/containers/storage/pkg/archive github.com/containers/storage/pkg/chrootarchive github.com/containers/storage/pkg/chunked github.com/containers/storage/pkg/chunked/compressor +github.com/containers/storage/pkg/chunked/dump github.com/containers/storage/pkg/chunked/internal github.com/containers/storage/pkg/config github.com/containers/storage/pkg/devicemapper @@ -289,7 +309,7 @@ github.com/containers/storage/types # github.com/coreos/go-systemd/v22 v22.5.0 ## explicit; go 1.12 github.com/coreos/go-systemd/v22/dbus -# github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 +# github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f ## explicit github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer # github.com/cyphar/filepath-securejoin v0.2.4 @@ -310,7 +330,7 @@ github.com/docker/distribution/reference github.com/docker/distribution/registry/api/errcode github.com/docker/distribution/registry/api/v2 github.com/docker/distribution/registry/client/auth/challenge -# github.com/docker/docker v24.0.7+incompatible +# github.com/docker/docker v24.0.9+incompatible ## explicit github.com/docker/docker/api github.com/docker/docker/api/types @@ -345,8 +365,8 @@ github.com/docker/docker/pkg/system ## explicit; go 1.19 github.com/docker/docker-credential-helpers/client github.com/docker/docker-credential-helpers/credentials -# github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 -## explicit; go 1.13 +# github.com/docker/go-connections v0.4.1-0.20231110212414-fa09c952e3ea +## explicit; go 1.18 github.com/docker/go-connections/nat github.com/docker/go-connections/sockets github.com/docker/go-connections/tlsconfig @@ -356,16 +376,16 @@ github.com/docker/go-units # github.com/fsnotify/fsnotify v1.7.0 ## explicit; go 1.17 github.com/fsnotify/fsnotify -# github.com/fsouza/go-dockerclient v1.10.0 +# github.com/fsouza/go-dockerclient v1.10.1 ## explicit; go 1.20 github.com/fsouza/go-dockerclient -# github.com/go-jose/go-jose/v3 v3.0.1 +# github.com/go-jose/go-jose/v3 v3.0.3 ## explicit; go 1.12 github.com/go-jose/go-jose/v3 github.com/go-jose/go-jose/v3/cipher github.com/go-jose/go-jose/v3/json -# github.com/go-openapi/analysis v0.21.4 -## explicit; go 1.13 +# github.com/go-openapi/analysis v0.22.1 +## explicit; go 1.19 github.com/go-openapi/analysis github.com/go-openapi/analysis/internal/debug github.com/go-openapi/analysis/internal/flatten/normalize @@ -373,33 +393,33 @@ github.com/go-openapi/analysis/internal/flatten/operations github.com/go-openapi/analysis/internal/flatten/replace github.com/go-openapi/analysis/internal/flatten/schutils github.com/go-openapi/analysis/internal/flatten/sortref -# github.com/go-openapi/errors v0.20.4 -## explicit; go 1.14 +# github.com/go-openapi/errors v0.21.0 +## explicit; go 1.19 github.com/go-openapi/errors -# github.com/go-openapi/jsonpointer v0.20.0 -## explicit; go 1.18 +# github.com/go-openapi/jsonpointer v0.20.2 +## explicit; go 1.19 github.com/go-openapi/jsonpointer -# github.com/go-openapi/jsonreference v0.20.2 -## explicit; go 1.13 +# github.com/go-openapi/jsonreference v0.20.4 +## explicit; go 1.19 github.com/go-openapi/jsonreference github.com/go-openapi/jsonreference/internal -# github.com/go-openapi/loads v0.21.2 -## explicit; go 1.13 +# github.com/go-openapi/loads v0.21.5 +## explicit; go 1.19 github.com/go-openapi/loads -# github.com/go-openapi/runtime v0.26.0 -## explicit; go 1.18 +# github.com/go-openapi/runtime v0.26.2 +## explicit; go 1.19 github.com/go-openapi/runtime -# github.com/go-openapi/spec v0.20.9 -## explicit; go 1.13 +# github.com/go-openapi/spec v0.20.13 +## explicit; go 1.19 github.com/go-openapi/spec -# github.com/go-openapi/strfmt v0.21.7 +# github.com/go-openapi/strfmt v0.22.0 ## explicit; go 1.19 github.com/go-openapi/strfmt -# github.com/go-openapi/swag v0.22.4 -## explicit; go 1.18 +# github.com/go-openapi/swag v0.22.7 +## explicit; go 1.19 github.com/go-openapi/swag -# github.com/go-openapi/validate v0.22.1 -## explicit; go 1.14 +# github.com/go-openapi/validate v0.22.6 +## explicit; go 1.19 github.com/go-openapi/validate # github.com/godbus/dbus/v5 v5.1.0 ## explicit; go 1.12 @@ -418,7 +438,7 @@ github.com/golang/protobuf/ptypes github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes/duration github.com/golang/protobuf/ptypes/timestamp -# github.com/google/go-containerregistry v0.16.1 +# github.com/google/go-containerregistry v0.17.0 ## explicit; go 1.18 github.com/google/go-containerregistry/pkg/name github.com/google/go-containerregistry/pkg/v1 @@ -426,11 +446,11 @@ github.com/google/go-containerregistry/pkg/v1/types # github.com/google/go-intervals v0.0.2 ## explicit; go 1.12 github.com/google/go-intervals/intervalset -# github.com/google/uuid v1.4.0 +# github.com/google/uuid v1.5.0 ## explicit github.com/google/uuid -# github.com/gorilla/mux v1.8.0 -## explicit; go 1.12 +# github.com/gorilla/mux v1.8.1 +## explicit; go 1.20 github.com/gorilla/mux # github.com/hashicorp/errwrap v1.1.0 ## explicit @@ -450,8 +470,8 @@ github.com/josharian/intern # github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go -# github.com/klauspost/compress v1.17.2 -## explicit; go 1.18 +# github.com/klauspost/compress v1.17.4 +## explicit; go 1.19 github.com/klauspost/compress github.com/klauspost/compress/flate github.com/klauspost/compress/fse @@ -463,7 +483,7 @@ github.com/klauspost/compress/zstd/internal/xxhash # github.com/klauspost/pgzip v1.2.6 ## explicit github.com/klauspost/pgzip -# github.com/letsencrypt/boulder v0.0.0-20231103135357-6d8e6e74f818 +# github.com/letsencrypt/boulder v0.0.0-20240104140712-c1f7de06e9f8 ## explicit; go 1.21 github.com/letsencrypt/boulder/core github.com/letsencrypt/boulder/goodkey @@ -487,7 +507,7 @@ github.com/mattn/go-runewidth # github.com/mattn/go-shellwords v1.0.12 ## explicit; go 1.13 github.com/mattn/go-shellwords -# github.com/mattn/go-sqlite3 v1.14.18 +# github.com/mattn/go-sqlite3 v1.14.19 ## explicit; go 1.16 github.com/mattn/go-sqlite3 # github.com/miekg/pkcs11 v1.1.1 @@ -499,15 +519,24 @@ github.com/mistifyio/go-zfs/v3 # github.com/mitchellh/mapstructure v1.5.0 ## explicit; go 1.14 github.com/mitchellh/mapstructure +# github.com/moby/buildkit v0.12.5 +## explicit; go 1.20 +github.com/moby/buildkit/frontend/dockerfile/command +github.com/moby/buildkit/frontend/dockerfile/parser +github.com/moby/buildkit/frontend/dockerfile/shell +github.com/moby/buildkit/util/stack # github.com/moby/patternmatcher v0.6.0 ## explicit; go 1.19 github.com/moby/patternmatcher -# github.com/moby/sys/mountinfo v0.6.2 +# github.com/moby/sys/mountinfo v0.7.1 ## explicit; go 1.16 github.com/moby/sys/mountinfo # github.com/moby/sys/sequential v0.5.0 ## explicit; go 1.17 github.com/moby/sys/sequential +# github.com/moby/sys/user v0.1.0 +## explicit; go 1.17 +github.com/moby/sys/user # github.com/moby/term v0.5.0 ## explicit; go 1.18 github.com/moby/term @@ -532,18 +561,18 @@ github.com/opencontainers/go-digest/digestset ## explicit; go 1.18 github.com/opencontainers/image-spec/specs-go github.com/opencontainers/image-spec/specs-go/v1 -# github.com/opencontainers/runc v1.1.12 -## explicit; go 1.17 +# github.com/opencontainers/runc v1.2.0-rc.1 +## explicit; go 1.20 github.com/opencontainers/runc/libcontainer/apparmor github.com/opencontainers/runc/libcontainer/configs github.com/opencontainers/runc/libcontainer/devices github.com/opencontainers/runc/libcontainer/user github.com/opencontainers/runc/libcontainer/userns github.com/opencontainers/runc/libcontainer/utils -# github.com/opencontainers/runtime-spec v1.1.0 +# github.com/opencontainers/runtime-spec v1.2.0 ## explicit github.com/opencontainers/runtime-spec/specs-go -# github.com/opencontainers/runtime-tools v0.9.1-0.20230317050512-e931285f4b69 +# github.com/opencontainers/runtime-tools v0.9.1-0.20230914150019-408c51e934dc ## explicit; go 1.19 github.com/opencontainers/runtime-tools/generate github.com/opencontainers/runtime-tools/generate/seccomp @@ -554,7 +583,7 @@ github.com/opencontainers/selinux/go-selinux github.com/opencontainers/selinux/go-selinux/label github.com/opencontainers/selinux/pkg/pwalk github.com/opencontainers/selinux/pkg/pwalkdir -# github.com/openshift/imagebuilder v1.2.5 +# github.com/openshift/imagebuilder v1.2.6-0.20231127234745-ef2a5fe47510 ## explicit; go 1.19 github.com/openshift/imagebuilder github.com/openshift/imagebuilder/dockerfile/command @@ -580,16 +609,16 @@ github.com/rivo/uniseg # github.com/seccomp/libseccomp-golang v0.10.0 ## explicit; go 1.14 github.com/seccomp/libseccomp-golang -# github.com/secure-systems-lab/go-securesystemslib v0.7.0 +# github.com/secure-systems-lab/go-securesystemslib v0.8.0 ## explicit; go 1.20 github.com/secure-systems-lab/go-securesystemslib/encrypted # github.com/sigstore/fulcio v1.4.3 ## explicit; go 1.20 github.com/sigstore/fulcio/pkg/certificate -# github.com/sigstore/rekor v1.3.3 +# github.com/sigstore/rekor v1.3.4 ## explicit; go 1.21 github.com/sigstore/rekor/pkg/generated/models -# github.com/sigstore/sigstore v1.7.5 +# github.com/sigstore/sigstore v1.8.0 ## explicit; go 1.20 github.com/sigstore/sigstore/pkg/cryptoutils github.com/sigstore/sigstore/pkg/signature @@ -598,7 +627,7 @@ github.com/sigstore/sigstore/pkg/signature/payload # github.com/sirupsen/logrus v1.9.3 ## explicit; go 1.13 github.com/sirupsen/logrus -# github.com/spf13/cobra v1.7.0 +# github.com/spf13/cobra v1.8.0 ## explicit; go 1.15 github.com/spf13/cobra # github.com/spf13/pflag v1.0.5 @@ -607,7 +636,7 @@ github.com/spf13/pflag # github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 ## explicit; go 1.19 github.com/stefanberger/go-pkcs11uri -# github.com/sylabs/sif/v2 v2.15.0 +# github.com/sylabs/sif/v2 v2.15.1 ## explicit; go 1.20 github.com/sylabs/sif/v2/pkg/sif # github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 @@ -625,15 +654,15 @@ github.com/ulikunitz/xz github.com/ulikunitz/xz/internal/hash github.com/ulikunitz/xz/internal/xlog github.com/ulikunitz/xz/lzma -# github.com/vanilla-os/prometheus v0.1.6 -## explicit; go 1.19 +# github.com/vanilla-os/prometheus v1.0.0 +## explicit; go 1.21 github.com/vanilla-os/prometheus # github.com/vbatts/tar-split v0.11.5 ## explicit; go 1.17 github.com/vbatts/tar-split/archive/tar github.com/vbatts/tar-split/tar/asm github.com/vbatts/tar-split/tar/storage -# github.com/vbauerster/mpb/v8 v8.6.2 +# github.com/vbauerster/mpb/v8 v8.7.1 ## explicit; go 1.17 github.com/vbauerster/mpb/v8 github.com/vbauerster/mpb/v8/cwriter @@ -649,7 +678,7 @@ github.com/vishvananda/netns # go.etcd.io/bbolt v1.3.8 ## explicit; go 1.17 go.etcd.io/bbolt -# go.mongodb.org/mongo-driver v1.12.1 +# go.mongodb.org/mongo-driver v1.13.1 ## explicit; go 1.13 go.mongodb.org/mongo-driver/bson go.mongodb.org/mongo-driver/bson/bsoncodec @@ -668,8 +697,10 @@ go.opencensus.io/internal go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/tracestate -# golang.org/x/crypto v0.17.0 +# golang.org/x/crypto v0.21.0 ## explicit; go 1.18 +golang.org/x/crypto/argon2 +golang.org/x/crypto/blake2b golang.org/x/crypto/blowfish golang.org/x/crypto/cast5 golang.org/x/crypto/chacha20 @@ -687,13 +718,16 @@ golang.org/x/crypto/openpgp/errors golang.org/x/crypto/openpgp/packet golang.org/x/crypto/openpgp/s2k golang.org/x/crypto/pbkdf2 +golang.org/x/crypto/ripemd160 golang.org/x/crypto/salsa20/salsa golang.org/x/crypto/scrypt golang.org/x/crypto/sha3 golang.org/x/crypto/ssh golang.org/x/crypto/ssh/agent golang.org/x/crypto/ssh/internal/bcrypt_pbkdf -# golang.org/x/exp v0.0.0-20231006140011-7918f672742d +golang.org/x/crypto/twofish +golang.org/x/crypto/xts +# golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc ## explicit; go 1.20 golang.org/x/exp/constraints golang.org/x/exp/maps @@ -701,27 +735,26 @@ golang.org/x/exp/slices # golang.org/x/mod v0.14.0 ## explicit; go 1.18 golang.org/x/mod/semver -# golang.org/x/net v0.17.0 -## explicit; go 1.17 +# golang.org/x/net v0.23.0 +## explicit; go 1.18 golang.org/x/net/http/httpguts golang.org/x/net/http2 golang.org/x/net/http2/hpack golang.org/x/net/idna golang.org/x/net/internal/timeseries golang.org/x/net/trace -# golang.org/x/sync v0.5.0 +# golang.org/x/sync v0.6.0 ## explicit; go 1.18 golang.org/x/sync/errgroup golang.org/x/sync/semaphore -# golang.org/x/sys v0.15.0 +# golang.org/x/sys v0.18.0 ## explicit; go 1.18 golang.org/x/sys/cpu -golang.org/x/sys/execabs golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/term v0.15.0 +# golang.org/x/term v0.18.0 ## explicit; go 1.18 golang.org/x/term # golang.org/x/text v0.14.0 @@ -730,7 +763,7 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/tools v0.14.0 +# golang.org/x/tools v0.16.1 ## explicit; go 1.18 golang.org/x/tools/cmd/stringer golang.org/x/tools/go/gcexportdata @@ -749,10 +782,11 @@ golang.org/x/tools/internal/pkgbits golang.org/x/tools/internal/tokeninternal golang.org/x/tools/internal/typeparams golang.org/x/tools/internal/typesinternal -# google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405 +golang.org/x/tools/internal/versions +# google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917 ## explicit; go 1.19 google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.59.0 +# google.golang.org/grpc v1.60.1 ## explicit; go 1.19 google.golang.org/grpc google.golang.org/grpc/attributes @@ -788,6 +822,7 @@ google.golang.org/grpc/internal/metadata google.golang.org/grpc/internal/pretty google.golang.org/grpc/internal/resolver google.golang.org/grpc/internal/resolver/dns +google.golang.org/grpc/internal/resolver/dns/internal google.golang.org/grpc/internal/resolver/passthrough google.golang.org/grpc/internal/resolver/unix google.golang.org/grpc/internal/serviceconfig @@ -799,18 +834,20 @@ google.golang.org/grpc/keepalive google.golang.org/grpc/metadata google.golang.org/grpc/peer google.golang.org/grpc/resolver +google.golang.org/grpc/resolver/dns google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.31.0 -## explicit; go 1.11 +# google.golang.org/protobuf v1.33.0 +## explicit; go 1.17 google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire google.golang.org/protobuf/internal/descfmt google.golang.org/protobuf/internal/descopts google.golang.org/protobuf/internal/detrand +google.golang.org/protobuf/internal/editiondefaults google.golang.org/protobuf/internal/encoding/defval google.golang.org/protobuf/internal/encoding/json google.golang.org/protobuf/internal/encoding/messageset @@ -834,10 +871,11 @@ google.golang.org/protobuf/reflect/protoregistry google.golang.org/protobuf/runtime/protoiface google.golang.org/protobuf/runtime/protoimpl google.golang.org/protobuf/types/descriptorpb +google.golang.org/protobuf/types/gofeaturespb google.golang.org/protobuf/types/known/anypb google.golang.org/protobuf/types/known/durationpb google.golang.org/protobuf/types/known/timestamppb -# gopkg.in/go-jose/go-jose.v2 v2.6.1 +# gopkg.in/go-jose/go-jose.v2 v2.6.3 ## explicit gopkg.in/go-jose/go-jose.v2 gopkg.in/go-jose/go-jose.v2/cipher @@ -845,3 +883,6 @@ gopkg.in/go-jose/go-jose.v2/json # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 +# tags.cncf.io/container-device-interface v0.6.2 +## explicit; go 1.19 +tags.cncf.io/container-device-interface/pkg/parser diff --git a/vendor/tags.cncf.io/container-device-interface/LICENSE b/vendor/tags.cncf.io/container-device-interface/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/tags.cncf.io/container-device-interface/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/parser/parser.go b/vendor/tags.cncf.io/container-device-interface/pkg/parser/parser.go similarity index 100% rename from vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/parser/parser.go rename to vendor/tags.cncf.io/container-device-interface/pkg/parser/parser.go