From 44198da530cdff50b1ff5cbde101d83b0599cdcb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 26 Oct 2023 07:11:49 +0000 Subject: [PATCH] Bump github.com/hashicorp/nomad from 1.4.4 to 1.4.6 Bumps [github.com/hashicorp/nomad](https://github.com/hashicorp/nomad) from 1.4.4 to 1.4.6. - [Release notes](https://github.com/hashicorp/nomad/releases) - [Changelog](https://github.com/hashicorp/nomad/blob/main/CHANGELOG.md) - [Commits](https://github.com/hashicorp/nomad/compare/v1.4.4...v1.4.6) --- updated-dependencies: - dependency-name: github.com/hashicorp/nomad dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- go.mod | 39 +- go.sum | 98 +- .../Masterminds/sprig/v3/CHANGELOG.md | 6 + .../coreos/go-systemd/v22/dbus/dbus.go | 5 + .../coreos/go-systemd/v22/dbus/methods.go | 34 + vendor/github.com/docker/cli/AUTHORS | 101 +- .../docker/cli/cli/config/config.go | 20 +- .../docker/cli/cli/config/configfile/file.go | 87 +- .../cli/cli/config/configfile/file_unix.go | 2 +- .../cli/cli/config/credentials/file_store.go | 5 +- vendor/github.com/google/go-cmp/LICENSE | 27 + .../github.com/google/go-cmp/cmp/compare.go | 669 +++++++++ .../google/go-cmp/cmp/export_panic.go | 16 + .../google/go-cmp/cmp/export_unsafe.go | 36 + .../go-cmp/cmp/internal/diff/debug_disable.go | 18 + .../go-cmp/cmp/internal/diff/debug_enable.go | 123 ++ .../google/go-cmp/cmp/internal/diff/diff.go | 402 ++++++ .../google/go-cmp/cmp/internal/flags/flags.go | 9 + .../go-cmp/cmp/internal/function/func.go | 99 ++ .../google/go-cmp/cmp/internal/value/name.go | 164 +++ .../cmp/internal/value/pointer_purego.go | 34 + .../cmp/internal/value/pointer_unsafe.go | 37 + .../google/go-cmp/cmp/internal/value/sort.go | 106 ++ .../github.com/google/go-cmp/cmp/options.go | 554 ++++++++ vendor/github.com/google/go-cmp/cmp/path.go | 380 +++++ vendor/github.com/google/go-cmp/cmp/report.go | 54 + .../google/go-cmp/cmp/report_compare.go | 433 ++++++ .../google/go-cmp/cmp/report_references.go | 264 ++++ .../google/go-cmp/cmp/report_reflect.go | 414 ++++++ .../google/go-cmp/cmp/report_slices.go | 614 ++++++++ .../google/go-cmp/cmp/report_text.go | 432 ++++++ .../google/go-cmp/cmp/report_value.go | 121 ++ .../hashicorp/go-plugin/CHANGELOG.md | 41 + vendor/github.com/hashicorp/go-plugin/LICENSE | 2 + .../github.com/hashicorp/go-plugin/README.md | 5 +- .../github.com/hashicorp/go-plugin/client.go | 43 +- .../hashicorp/go-plugin/discover.go | 3 + .../github.com/hashicorp/go-plugin/error.go | 3 + .../hashicorp/go-plugin/grpc_broker.go | 3 + .../hashicorp/go-plugin/grpc_client.go | 3 + .../hashicorp/go-plugin/grpc_controller.go | 3 + .../hashicorp/go-plugin/grpc_server.go | 19 +- .../hashicorp/go-plugin/grpc_stdio.go | 3 + .../go-plugin/internal/plugin/gen.go | 3 + .../internal/plugin/grpc_broker.proto | 3 + .../internal/plugin/grpc_controller.proto | 3 + .../internal/plugin/grpc_stdio.proto | 3 + .../hashicorp/go-plugin/log_entry.go | 3 + vendor/github.com/hashicorp/go-plugin/mtls.go | 3 + .../hashicorp/go-plugin/mux_broker.go | 3 + .../hashicorp/go-plugin/notes_unix.go | 67 + .../hashicorp/go-plugin/notes_windows.go | 43 + .../github.com/hashicorp/go-plugin/plugin.go | 3 + .../github.com/hashicorp/go-plugin/process.go | 3 + .../hashicorp/go-plugin/process_posix.go | 4 + .../hashicorp/go-plugin/process_windows.go | 3 + .../hashicorp/go-plugin/protocol.go | 3 + .../hashicorp/go-plugin/rpc_client.go | 3 + .../hashicorp/go-plugin/rpc_server.go | 22 +- .../github.com/hashicorp/go-plugin/server.go | 9 +- .../hashicorp/go-plugin/server_mux.go | 3 + .../github.com/hashicorp/go-plugin/stream.go | 3 + .../github.com/hashicorp/go-plugin/testing.go | 3 + .../go-secure-stdlib/tlsutil/tlsutil.go | 52 +- vendor/github.com/hashicorp/go-set/LICENSE | 2 + vendor/github.com/hashicorp/go-set/README.md | 5 +- vendor/github.com/hashicorp/go-set/hashset.go | 29 +- vendor/github.com/hashicorp/go-set/set.go | 31 +- vendor/github.com/hashicorp/nomad/acl/acl.go | 33 +- .../github.com/hashicorp/nomad/acl/policy.go | 6 +- vendor/github.com/hashicorp/nomad/ci/ports.go | 20 + .../nomad/client/allocdir/alloc_dir.go | 9 +- .../nomad/client/allocdir/task_dir.go | 9 +- .../nomad/client/allocdir/testing.go | 3 +- .../hashicorp/nomad/client/config/testing.go | 3 +- .../nomad/client/dynamicplugins/registry.go | 12 +- .../nomad/client/lib/cgutil/cgutil_linux.go | 14 +- .../client/lib/cgutil/cpuset_manager_v1.go | 3 +- .../nomad/client/logmon/logging/rotator.go | 5 +- .../pluginmanager/csimanager/manager.go | 2 +- .../serviceregistration/checks/client.go | 9 +- .../service_registration.go | 20 +- .../client/serviceregistration/workload.go | 4 +- .../nomad/client/taskenv/services.go | 68 +- .../nomad/command/agent/host/host.go | 4 +- .../nomad/helper/freeport/ephemeral_darwin.go | 47 - .../helper/freeport/ephemeral_freebsd.go | 47 - .../nomad/helper/freeport/ephemeral_linux.go | 42 - .../helper/freeport/ephemeral_windows.go | 12 - .../nomad/helper/freeport/freeport.go | 297 ---- .../hashicorp/nomad/helper/tlsutil/config.go | 498 +++++++ .../nomad/helper/tlsutil/generate.go | 313 +++++ .../hashicorp/nomad/nomad/mock/alloc.go | 1 + .../hashicorp/nomad/nomad/structs/acl.go | 52 + .../hashicorp/nomad/nomad/structs/bitmap.go | 28 +- .../nomad/nomad/structs/config/artifact.go | 2 +- .../nomad/nomad/structs/config/consul.go | 8 + .../hashicorp/nomad/nomad/structs/csi.go | 4 +- .../hashicorp/nomad/nomad/structs/network.go | 56 +- .../hashicorp/nomad/nomad/structs/operator.go | 2 +- .../hashicorp/nomad/nomad/structs/services.go | 49 +- .../nomad/nomad/structs/structs.generated.go | 620 +++++---- .../hashicorp/nomad/nomad/structs/structs.go | 97 +- .../hashicorp/nomad/nomad/structs/uuid.go | 4 +- .../hashicorp/nomad/nomad/structs/volumes.go | 12 +- .../hashicorp/nomad/plugins/drivers/driver.go | 2 +- .../nomad/plugins/drivers/proto/driver.pb.go | 2 +- .../nomad/plugins/drivers/proto/driver.proto | 2 +- .../plugins/drivers/testutils/exec_testing.go | 3 +- .../plugins/drivers/testutils/testing.go | 3 +- .../hashicorp/nomad/testutil/server.go | 23 +- .../hashicorp/nomad/testutil/tls.go | 49 + .../hashicorp/nomad/testutil/vault.go | 140 +- .../hashicorp/nomad/testutil/wait.go | 63 +- .../hashicorp/nomad/version/version.go | 2 +- vendor/github.com/hashicorp/vault/api/LICENSE | 2 + .../github.com/hashicorp/vault/api/logical.go | 53 +- vendor/github.com/hashicorp/vault/sdk/LICENSE | 2 + .../vault/sdk/helper/certutil/helpers.go | 74 +- .../vault/sdk/helper/certutil/types.go | 12 +- .../sdk/helper/pluginutil/multiplexing.go | 7 +- .../sdk/helper/pluginutil/multiplexing.pb.go | 2 +- .../hashicorp/vault/sdk/logical/error.go | 5 + .../vault/sdk/logical/identity.pb.go | 2 +- .../hashicorp/vault/sdk/logical/plugin.pb.go | 2 +- .../hashicorp/vault/sdk/logical/response.go | 3 +- .../vault/sdk/logical/response_util.go | 22 + .../hashicorp/vault/sdk/logical/version.pb.go | 2 +- .../hashicorp/vault/sdk/physical/cache.go | 1 - .../vault/sdk/version/version_base.go | 2 +- .../rogpeppe/go-internal/fmtsort/mapelem.go | 15 +- .../go-internal/fmtsort/mapelem_1.11.go | 7 +- .../shirou/gopsutil/v3/cpu/cpu_linux.go | 76 + .../shirou/gopsutil/v3/cpu/cpu_windows.go | 14 +- .../shirou/gopsutil/v3/disk/disk_aix_nocgo.go | 70 +- .../shirou/gopsutil/v3/disk/disk_darwin.go | 8 +- .../gopsutil/v3/disk/disk_darwin_cgo.go | 4 +- .../gopsutil/v3/disk/disk_darwin_nocgo.go | 4 +- .../shirou/gopsutil/v3/disk/disk_solaris.go | 119 +- .../shirou/gopsutil/v3/disk/disk_unix.go | 23 +- .../shirou/gopsutil/v3/disk/disk_windows.go | 131 +- .../shirou/gopsutil/v3/host/host.go | 4 + .../shirou/gopsutil/v3/host/host_linux.go | 4 +- .../gopsutil/v3/host/host_linux_ppc64.go | 48 + .../shirou/gopsutil/v3/host/host_posix.go | 11 +- .../shirou/gopsutil/v3/host/host_windows.go | 26 +- .../gopsutil/v3/internal/common/common.go | 14 +- .../v3/internal/common/common_linux.go | 20 + .../types.go => internal/common/warnings.go} | 16 +- .../shirou/gopsutil/v3/mem/mem_solaris.go | 27 + .../shirou/gopsutil/v3/net/net_fallback.go | 4 +- .../shirou/gopsutil/v3/net/net_linux.go | 2 +- .../shirou/gopsutil/v3/net/net_solaris.go | 143 ++ .../shirou/gopsutil/v3/net/net_unix.go | 2 +- .../gopsutil/v3/process/process_darwin_cgo.go | 3 + .../gopsutil/v3/process/process_windows.go | 30 +- vendor/github.com/shoenig/test/LICENSE | 363 +++++ .../shoenig/test/interfaces/interfaces.go | 68 + .../test/internal/assertions/assertions.go | 1229 +++++++++++++++++ .../test/internal/brokenfs/fs_default.go | 7 + .../test/internal/brokenfs/fs_windows.go | 17 + .../test/internal/constraints/constraints.go | 50 + vendor/github.com/shoenig/test/must/assert.go | 12 + .../shoenig/test/must/fs_default.go | 9 + .../shoenig/test/must/fs_windows.go | 19 + .../shoenig/test/must/invocations.go | 27 + vendor/github.com/shoenig/test/must/must.go | 717 ++++++++++ .../github.com/shoenig/test/must/scripts.go | 95 ++ .../github.com/shoenig/test/must/settings.go | 49 + .../github.com/shoenig/test/portal/portal.go | 118 ++ .../shoenig/test/portal/portal_default.go | 29 + .../shoenig/test/portal/portal_windows.go | 12 + vendor/github.com/shoenig/test/wait/wait.go | 431 ++++++ .../tklauser/go-sysconf/.cirrus.yml | 21 +- vendor/github.com/tklauser/go-sysconf/LICENSE | 2 +- .../zsysconf_values_freebsd_riscv64.go | 12 + .../github.com/tklauser/numcpus/.cirrus.yml | 11 +- .../zclconf/go-cty/cty/convert/conversion.go | 4 +- .../cty/convert/conversion_collection.go | 81 +- .../go-cty/cty/convert/conversion_dynamic.go | 104 ++ .../go-cty/cty/convert/conversion_object.go | 8 +- .../zclconf/go-cty/cty/convert/public.go | 2 +- .../zclconf/go-cty/cty/function/argument.go | 3 + .../zclconf/go-cty/cty/function/function.go | 62 + .../go-cty/cty/function/stdlib/bool.go | 3 + .../go-cty/cty/function/stdlib/bytes.go | 2 + .../go-cty/cty/function/stdlib/collection.go | 31 +- .../go-cty/cty/function/stdlib/conversion.go | 2 + .../zclconf/go-cty/cty/function/stdlib/csv.go | 1 + .../go-cty/cty/function/stdlib/datetime.go | 2 + .../go-cty/cty/function/stdlib/format.go | 2 + .../go-cty/cty/function/stdlib/general.go | 5 +- .../go-cty/cty/function/stdlib/json.go | 2 + .../go-cty/cty/function/stdlib/number.go | 24 +- .../go-cty/cty/function/stdlib/regexp.go | 2 + .../go-cty/cty/function/stdlib/sequence.go | 4 +- .../zclconf/go-cty/cty/function/stdlib/set.go | 5 + .../go-cty/cty/function/stdlib/string.go | 81 +- .../cty/function/stdlib/string_replace.go | 19 +- vendor/go.opencensus.io/Makefile | 8 +- vendor/go.opencensus.io/opencensus.go | 2 +- vendor/go.opencensus.io/trace/doc.go | 13 +- vendor/go.opencensus.io/trace/lrumap.go | 2 +- vendor/go.opencensus.io/trace/trace_go11.go | 1 + .../go.opencensus.io/trace/trace_nongo11.go | 1 + vendor/modules.txt | 68 +- 206 files changed, 11940 insertions(+), 1462 deletions(-) create mode 100644 vendor/github.com/google/go-cmp/LICENSE create mode 100644 vendor/github.com/google/go-cmp/cmp/compare.go create mode 100644 vendor/github.com/google/go-cmp/cmp/export_panic.go create mode 100644 vendor/github.com/google/go-cmp/cmp/export_unsafe.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/function/func.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/name.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/sort.go create mode 100644 vendor/github.com/google/go-cmp/cmp/options.go create mode 100644 vendor/github.com/google/go-cmp/cmp/path.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_compare.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_references.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_reflect.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_slices.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_text.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_value.go create mode 100644 vendor/github.com/hashicorp/go-plugin/CHANGELOG.md create mode 100644 vendor/github.com/hashicorp/go-plugin/notes_unix.go create mode 100644 vendor/github.com/hashicorp/go-plugin/notes_windows.go create mode 100644 vendor/github.com/hashicorp/nomad/ci/ports.go delete mode 100644 vendor/github.com/hashicorp/nomad/helper/freeport/ephemeral_darwin.go delete mode 100644 vendor/github.com/hashicorp/nomad/helper/freeport/ephemeral_freebsd.go delete mode 100644 vendor/github.com/hashicorp/nomad/helper/freeport/ephemeral_linux.go delete mode 100644 vendor/github.com/hashicorp/nomad/helper/freeport/ephemeral_windows.go delete mode 100644 vendor/github.com/hashicorp/nomad/helper/freeport/freeport.go create mode 100644 vendor/github.com/hashicorp/nomad/helper/tlsutil/config.go create mode 100644 vendor/github.com/hashicorp/nomad/helper/tlsutil/generate.go create mode 100644 vendor/github.com/hashicorp/nomad/testutil/tls.go create mode 100644 vendor/github.com/shirou/gopsutil/v3/host/host_linux_ppc64.go rename vendor/github.com/shirou/gopsutil/v3/{host/types.go => internal/common/warnings.go} (59%) create mode 100644 vendor/github.com/shirou/gopsutil/v3/net/net_solaris.go create mode 100644 vendor/github.com/shoenig/test/LICENSE create mode 100644 vendor/github.com/shoenig/test/interfaces/interfaces.go create mode 100644 vendor/github.com/shoenig/test/internal/assertions/assertions.go create mode 100644 vendor/github.com/shoenig/test/internal/brokenfs/fs_default.go create mode 100644 vendor/github.com/shoenig/test/internal/brokenfs/fs_windows.go create mode 100644 vendor/github.com/shoenig/test/internal/constraints/constraints.go create mode 100644 vendor/github.com/shoenig/test/must/assert.go create mode 100644 vendor/github.com/shoenig/test/must/fs_default.go create mode 100644 vendor/github.com/shoenig/test/must/fs_windows.go create mode 100644 vendor/github.com/shoenig/test/must/invocations.go create mode 100644 vendor/github.com/shoenig/test/must/must.go create mode 100644 vendor/github.com/shoenig/test/must/scripts.go create mode 100644 vendor/github.com/shoenig/test/must/settings.go create mode 100644 vendor/github.com/shoenig/test/portal/portal.go create mode 100644 vendor/github.com/shoenig/test/portal/portal_default.go create mode 100644 vendor/github.com/shoenig/test/portal/portal_windows.go create mode 100644 vendor/github.com/shoenig/test/wait/wait.go create mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_riscv64.go diff --git a/go.mod b/go.mod index de4e4104..58030f81 100644 --- a/go.mod +++ b/go.mod @@ -9,17 +9,18 @@ require ( github.com/google/go-containerregistry v0.5.1 github.com/hashicorp/consul-template v0.29.6-0.20221026140134-90370e07bf62 github.com/hashicorp/go-hclog v1.3.1 - github.com/hashicorp/go-plugin v1.4.3 - github.com/hashicorp/nomad v1.4.4 + github.com/hashicorp/go-plugin v1.4.9 + github.com/hashicorp/nomad v1.4.6 github.com/stretchr/testify v1.8.1 ) require ( + cloud.google.com/go/storage v1.28.1 // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/LK4D4/joincontext v0.0.0-20171026170139-1724345da6d5 // indirect github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver/v3 v3.1.1 // indirect - github.com/Masterminds/sprig/v3 v3.2.0 // indirect + github.com/Masterminds/sprig/v3 v3.2.1 // indirect github.com/Microsoft/go-winio v0.5.2 // indirect github.com/Microsoft/hcsshim v0.9.6 // indirect github.com/agext/levenshtein v1.2.1 // indirect @@ -37,12 +38,12 @@ require ( github.com/containerd/console v1.0.3 // indirect github.com/containerd/containerd v1.6.18 // indirect github.com/containerd/stargz-snapshotter/estargz v0.4.1 // indirect - github.com/containernetworking/plugins v1.1.1 // indirect - github.com/coreos/go-systemd/v22 v22.3.2 // indirect + github.com/containernetworking/plugins v1.2.0 // indirect + github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/creack/pty v1.1.18 // indirect github.com/cyphar/filepath-securejoin v0.2.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/docker/cli v20.10.21+incompatible // indirect + github.com/docker/cli v23.0.1+incompatible // indirect github.com/docker/distribution v2.8.2+incompatible // indirect github.com/docker/docker v20.10.24+incompatible // indirect github.com/docker/docker-credential-helpers v0.6.4 // indirect @@ -61,6 +62,7 @@ require ( github.com/golang/protobuf v1.5.3 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/google/btree v1.0.0 // indirect + github.com/google/go-cmp v0.5.9 // indirect github.com/google/uuid v1.3.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect @@ -78,8 +80,8 @@ require ( github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 // indirect github.com/hashicorp/go-secure-stdlib/reloadutil v0.1.1 // indirect github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect - github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.1 // indirect - github.com/hashicorp/go-set v0.1.6 // indirect + github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.2 // indirect + github.com/hashicorp/go-set v0.1.8 // indirect github.com/hashicorp/go-sockaddr v1.0.2 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/hashicorp/go-version v1.6.0 // indirect @@ -91,9 +93,9 @@ require ( github.com/hashicorp/raft v1.3.11 // indirect github.com/hashicorp/raft-autopilot v0.1.6 // indirect github.com/hashicorp/serf v0.10.1 // indirect - github.com/hashicorp/vault/api v1.8.1 // indirect + github.com/hashicorp/vault/api v1.8.2 // indirect github.com/hashicorp/vault/api/auth/kubernetes v0.3.0 // indirect - github.com/hashicorp/vault/sdk v0.6.0 // indirect + github.com/hashicorp/vault/sdk v0.6.1 // indirect github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87 // indirect github.com/hpcloud/tail v1.0.1-0.20170814160653-37f427138745 // indirect github.com/huandu/xstrings v1.3.2 // indirect @@ -106,7 +108,7 @@ require ( github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.16 // indirect github.com/miekg/dns v1.1.50 // indirect - github.com/mitchellh/cli v1.1.4 // indirect + github.com/mitchellh/cli v1.1.5 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/go-ps v0.0.0-20190716172923-621e5597135b // indirect @@ -121,7 +123,6 @@ require ( github.com/morikuni/aec v1.0.0 // indirect github.com/mrunalp/fileutils v0.5.0 // indirect github.com/oklog/run v1.1.0 // indirect - github.com/onsi/gomega v1.17.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect github.com/opencontainers/runc v1.1.5 // indirect @@ -132,25 +133,26 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/posener/complete v1.2.3 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect - github.com/rogpeppe/go-internal v1.6.1 // indirect + github.com/rogpeppe/go-internal v1.9.0 // indirect github.com/ryanuber/go-glob v1.0.0 // indirect github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect github.com/seccomp/libseccomp-golang v0.10.0 // indirect - github.com/shirou/gopsutil/v3 v3.22.8 // indirect + github.com/shirou/gopsutil/v3 v3.23.1 // indirect + github.com/shoenig/test v0.6.1 // indirect github.com/shopspring/decimal v1.2.0 // indirect github.com/sirupsen/logrus v1.9.0 // indirect github.com/spf13/cast v1.3.1 // indirect github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect - github.com/tklauser/go-sysconf v0.3.10 // indirect - github.com/tklauser/numcpus v0.4.0 // indirect + github.com/tklauser/go-sysconf v0.3.11 // indirect + github.com/tklauser/numcpus v0.6.0 // indirect github.com/vishvananda/netlink v1.2.1-beta.2 // indirect github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 // indirect github.com/vmihailenco/msgpack/v4 v4.3.12 // indirect github.com/vmihailenco/tagparser v0.1.1 // indirect github.com/yusufpapurcu/wmi v1.2.2 // indirect - github.com/zclconf/go-cty v1.11.0 // indirect + github.com/zclconf/go-cty v1.12.1 // indirect go.etcd.io/bbolt v1.3.6 // indirect - go.opencensus.io v0.23.0 // indirect + go.opencensus.io v0.24.0 // indirect go.uber.org/atomic v1.9.0 // indirect golang.org/x/crypto v0.14.0 // indirect golang.org/x/exp v0.0.0-20220921164117-439092de6870 // indirect @@ -161,6 +163,7 @@ require ( golang.org/x/text v0.13.0 // indirect golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 // indirect golang.org/x/tools v0.6.0 // indirect + google.golang.org/api v0.114.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect google.golang.org/grpc v1.56.3 // indirect diff --git a/go.sum b/go.sum index a4565658..ac2a4996 100644 --- a/go.sum +++ b/go.sum @@ -26,7 +26,8 @@ cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIA cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.27.0 h1:YOO045NZI9RKfCj1c5A/ZtuuENUc8OAW+gHdGnDgyMQ= +cloud.google.com/go/storage v1.28.1 h1:F5QDG5ChchaAVQhINh24U99OWHURqrW8OmQcGKXcbgI= +cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= @@ -58,8 +59,8 @@ github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030I github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60= github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= -github.com/Masterminds/sprig/v3 v3.2.0 h1:P1ekkbuU73Ui/wS0nK1HOM37hh4xdfZo485UPf8rc+Y= -github.com/Masterminds/sprig/v3 v3.2.0/go.mod h1:tWhwTbUTndesPNeF0C900vKoq283u6zp4APT9vaF3SI= +github.com/Masterminds/sprig/v3 v3.2.1 h1:n6EPaDyLSvCEa3frruQvAiHuNp2dhBlMSmkEr+HuzGc= +github.com/Masterminds/sprig/v3 v3.2.1/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFPhxNuwnnxkKlk= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= @@ -116,7 +117,7 @@ github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= -github.com/aws/aws-sdk-go v1.44.126 h1:7HQJw2DNiwpxqMe2H7odGNT2rhO4SRrUe5/8dYXl0Jk= +github.com/aws/aws-sdk-go v1.44.184 h1:/MggyE66rOImXJKl1HqhLQITvWvqIV7w1Q4MaG6FHUo= github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= @@ -263,8 +264,8 @@ github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ github.com/containernetworking/cni v1.1.2 h1:wtRGZVv7olUHMOqouPpn3cXJWpJgM6+EUl31EQbXALQ= github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= -github.com/containernetworking/plugins v1.1.1 h1:+AGfFigZ5TiQH00vhR8qPeSatj53eNGz0C1d3wVYlHE= -github.com/containernetworking/plugins v1.1.1/go.mod h1:Sr5TH/eBsGLXK/h71HeLfX19sZPp3ry5uHSkI4LPxV8= +github.com/containernetworking/plugins v1.2.0 h1:SWgg3dQG1yzUo4d9iD8cwSVh1VqI+bP7mkPDoSfP9VU= +github.com/containernetworking/plugins v1.2.0/go.mod h1:/VjX4uHecW5vVimFa1wkG4s+r/s9qIfPdqlLF4TW8c4= github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= @@ -284,8 +285,9 @@ github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pq github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= -github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= @@ -312,8 +314,8 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v20.10.21+incompatible h1:qVkgyYUnOLQ98LtXBrwd/duVqPT2X4SHndOuGsfwyhU= -github.com/docker/cli v20.10.21+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v23.0.1+incompatible h1:LRyWITpGzl2C9e9uGxzisptnxAn1zfZKXy13Ul2Q5oM= +github.com/docker/cli v23.0.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= @@ -401,7 +403,6 @@ github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dp github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= @@ -480,8 +481,8 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-containerregistry v0.5.1 h1:/+mFTs4AlwsJ/mJe8NDtKb7BxLtbZFpcn8vDsneEkwQ= github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -500,10 +501,10 @@ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.2.0 h1:y8Yozv7SZtlU//QXbezB6QkpuE6jMD2/gfzk4AftXjs= +github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.6.0 h1:SXk3ABtQYDT/OH8jAyvEOQ58mgawq5C4o/4/89qN2ZU= +github.com/googleapis/gax-go/v2 v2.7.1 h1:gF4c0zjUP2H/s/hEGyLA3I0fA2ZWjzYiONAD6cvPr8A= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= @@ -528,8 +529,8 @@ github.com/hashicorp/consul-template v0.29.6-0.20221026140134-90370e07bf62 h1:72 github.com/hashicorp/consul-template v0.29.6-0.20221026140134-90370e07bf62/go.mod h1:oznME/M/L6XDklrE62H9R1Rp+WYtxrISywtwXpA+bgU= github.com/hashicorp/consul/api v1.15.3 h1:WYONYL2rxTXtlekAqblR2SCdJsizMDIj/uXb5wNy9zU= github.com/hashicorp/consul/api v1.15.3/go.mod h1:/g/qgcoBcEXALCNZgRRisyTW0nY86++L0KbeAMXYCeY= -github.com/hashicorp/consul/sdk v0.11.0 h1:HRzj8YSCln2yGgCumN5CL8lYlD3gBurnervJRJAZyC4= github.com/hashicorp/consul/sdk v0.11.0/go.mod h1:yPkX5Q6CsxTFMjQQDJwzeNmUUF5NUGGbrDsv9wTb8cw= +github.com/hashicorp/consul/sdk v0.13.0 h1:lce3nFlpv8humJL8rNrrGHYSKc3q+Kxfeg3Ii1m6ZWU= github.com/hashicorp/cronexpr v1.1.1 h1:NJZDd87hGXjoZBdvyCF9mX4DCq5Wy7+A/w+A7q0wn6c= github.com/hashicorp/cronexpr v1.1.1/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -563,8 +564,9 @@ github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHh github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.4.3 h1:DXmvivbWD5qdiBts9TpBC7BYL1Aia5sxbRgQB+v6UZM= github.com/hashicorp/go-plugin v1.4.3/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= +github.com/hashicorp/go-plugin v1.4.9 h1:ESiK220/qE0aGxWdzKIvRH69iLiuN/PjoLTm69RoWtU= +github.com/hashicorp/go-plugin v1.4.9/go.mod h1:viDMjcLJuDui6pXb8U4HVfb8AamCWhHGUjr2IrTF67s= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-retryablehttp v0.7.0 h1:eu1EI/mbirUgP5C8hVsTNaGZreBDlYiwC1FZWkvQPQ4= @@ -587,10 +589,11 @@ github.com/hashicorp/go-secure-stdlib/reloadutil v0.1.1/go.mod h1:Ch/bf00Qnx77MZ github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= -github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.1 h1:Yc026VyMyIpq1UWRnakHRG01U8fJm+nEfEmjoAb00n8= github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.1/go.mod h1:l8slYwnJA26yBz+ErHpp2IRCLr0vuOMGBORIz4rRiAs= -github.com/hashicorp/go-set v0.1.6 h1:fj/JG5B97sAOd9OQN4GL880yCE384fz3asNDpGbxkPo= -github.com/hashicorp/go-set v0.1.6/go.mod h1:ELvMcE+1mRHYPVgTFSQiecObIwZRxY5Q11EhbtmM5KQ= +github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.2 h1:phcbL8urUzF/kxA/Oj6awENaRwfWsjP59GW7u2qlDyY= +github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.2/go.mod h1:l8slYwnJA26yBz+ErHpp2IRCLr0vuOMGBORIz4rRiAs= +github.com/hashicorp/go-set v0.1.8 h1:q2r58lFkJrikmC4I+vS3A+bn6QgR7EYeFD8kRiAIAnk= +github.com/hashicorp/go-set v0.1.8/go.mod h1:wedp+UE6HoxBywExd7mrdGdcXOo3awtiELmnRnpzsKI= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= @@ -618,8 +621,8 @@ github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOn github.com/hashicorp/memberlist v0.3.1/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= -github.com/hashicorp/nomad v1.4.4 h1:mfPZ/ZZU+n2zTCM6o9SPn5fJyKzZga+ri2iyLuAY/LM= -github.com/hashicorp/nomad v1.4.4/go.mod h1:NMRKlNmWQhRgBGiUABi+nxry45SDvNYQib3io2RJUL0= +github.com/hashicorp/nomad v1.4.6 h1:28KdRPWy9S9LCEFKpvk53icv4he5d8bRTeRrg9R0IQU= +github.com/hashicorp/nomad v1.4.6/go.mod h1:Bv6eHWRLyqnViX/O6aqbHhtwVA3Rqq2IWSjmyKy3A/g= github.com/hashicorp/nomad/api v0.0.0-20221006174558-2aa7e66bdb52 h1:O7oUpvlilRu3CWMiQtQF/pkF6ZUSWmjD0sn+T8oqewo= github.com/hashicorp/nomad/api v0.0.0-20221006174558-2aa7e66bdb52/go.mod h1:1dS8jZqAXhEreBcb26wpaV4Llk2cLO2sucuDKI+oTIs= github.com/hashicorp/raft v1.2.0/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= @@ -632,12 +635,13 @@ github.com/hashicorp/serf v0.9.7/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpT github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= github.com/hashicorp/vault/api v1.8.0/go.mod h1:uJrw6D3y9Rv7hhmS17JQC50jbPDAZdjZoTtrCCxxs7E= -github.com/hashicorp/vault/api v1.8.1 h1:bMieWIe6dAlqAAPReZO/8zYtXaWUg/21umwqGZpEjCI= -github.com/hashicorp/vault/api v1.8.1/go.mod h1:uJrw6D3y9Rv7hhmS17JQC50jbPDAZdjZoTtrCCxxs7E= +github.com/hashicorp/vault/api v1.8.2 h1:C7OL9YtOtwQbTKI9ogB0A1wffRbCN+rH/LLCHO3d8HM= +github.com/hashicorp/vault/api v1.8.2/go.mod h1:ML8aYzBIhY5m1MD1B2Q0JV89cC85YVH4t5kBaZiyVaE= github.com/hashicorp/vault/api/auth/kubernetes v0.3.0 h1:HkaCmTKzcgLa2tjdiAid1rbmyQNmQGHfnmvIIM2WorY= github.com/hashicorp/vault/api/auth/kubernetes v0.3.0/go.mod h1:l1B4MGtLc+P37MabBQiIhP3qd9agj0vqhETmaQjjC/Y= -github.com/hashicorp/vault/sdk v0.6.0 h1:6Z+In5DXHiUfZvIZdMx7e2loL1PPyDjA4bVh9ZTIAhs= github.com/hashicorp/vault/sdk v0.6.0/go.mod h1:+DRpzoXIdMvKc88R4qxr+edwy/RvH5QK8itmxLiDHLc= +github.com/hashicorp/vault/sdk v0.6.1 h1:sjZC1z4j5Rh2GXYbkxn5BLK05S1p7+MhW4AgdUmgRUA= +github.com/hashicorp/vault/sdk v0.6.1/go.mod h1:Ck4JuAC6usTphfrrRJCRH+7/N7O2ozZzkm/fzQFt4uM= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87 h1:xixZ2bWeofWV68J+x6AzmKuVM/JWCQwkWm6GW/MUR6I= github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= @@ -742,8 +746,8 @@ github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/cli v1.1.2/go.mod h1:6iaV0fGdElS6dPBx0EApTxHrcWvmJphyh2n8YBLPPZ4= -github.com/mitchellh/cli v1.1.4 h1:qj8czE26AU4PbiaPXK5uVmMSM+V5BYsFBiM9HhGRLUA= -github.com/mitchellh/cli v1.1.4/go.mod h1:vTLESy5mRhKOs9KDp0/RATawxP1UqBmdrpVRMnpcvKQ= +github.com/mitchellh/cli v1.1.5 h1:OxRIeJXpAMztws/XHlN2vu6imG5Dpq+j61AzAX5fLng= +github.com/mitchellh/cli v1.1.5/go.mod h1:v8+iFts2sPIKUV1ltktPXMCC8fumSKFItNcD2cLtRR4= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= @@ -799,7 +803,6 @@ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWb github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= @@ -814,18 +817,15 @@ github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+ github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= -github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= -github.com/onsi/gomega v1.17.0 h1:9Luw4uT5HTjHTN8+aNcSThgH1vdXnmdJ8xIfZ4wyTRE= -github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.24.2 h1:J/tulyYK6JwBldPViHJReihxxZ+22FHs0piGjQAvoUE= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -923,8 +923,9 @@ github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40T github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= @@ -940,9 +941,10 @@ github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod github.com/seccomp/libseccomp-golang v0.10.0 h1:aA4bp+/Zzi0BnWZ2F1wgNBs5gTpm+na2rWM6M9YjLpY= github.com/seccomp/libseccomp-golang v0.10.0/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/shirou/gopsutil/v3 v3.22.8 h1:a4s3hXogo5mE2PfdfJIonDbstO/P+9JszdfhAHSzD9Y= -github.com/shirou/gopsutil/v3 v3.22.8/go.mod h1:s648gW4IywYzUfE/KjXxUsqrqx/T2xO5VqOXxONeRfI= -github.com/shoenig/test v0.4.3 h1:3+CjrpqCwtL08S0wZQilu9WWR/S2CdsLKhHjbJqPj/I= +github.com/shirou/gopsutil/v3 v3.23.1 h1:a9KKO+kGLKEvcPIs4W62v0nu3sciVDOOOPUD0Hz7z/4= +github.com/shirou/gopsutil/v3 v3.23.1/go.mod h1:NN6mnm5/0k8jw4cBfCnJtr5L7ErOTg18tMNpgFkn0hA= +github.com/shoenig/test v0.6.1 h1:TVIih3yGvaH8Yci2OedB/NAhOC9UlNi5+ajCVyMPflg= +github.com/shoenig/test v0.6.1/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= @@ -1001,10 +1003,10 @@ github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= -github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw= -github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk= -github.com/tklauser/numcpus v0.4.0 h1:E53Dm1HjH1/R2/aoCtXtPgzmElmn51aOkhCFSuZq//o= -github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ= +github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM= +github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI= +github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms= +github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= @@ -1046,8 +1048,8 @@ github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPS github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= github.com/zclconf/go-cty v1.8.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= -github.com/zclconf/go-cty v1.11.0 h1:726SxLdi2SDnjY+BStqB9J1hNp4+2WlzyXLuimibIe0= -github.com/zclconf/go-cty v1.11.0/go.mod h1:s9IfD1LK5ccNMSWCVFCE2rJfHiZgi7JijgeWIMfhLvA= +github.com/zclconf/go-cty v1.12.1 h1:PcupnljUm9EIvbgSHQnHhUr3fO6oFmkOrvs2BAFNXXY= +github.com/zclconf/go-cty v1.12.1/go.mod h1:s9IfD1LK5ccNMSWCVFCE2rJfHiZgi7JijgeWIMfhLvA= github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= @@ -1060,8 +1062,8 @@ go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -1160,7 +1162,6 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= @@ -1170,7 +1171,6 @@ golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= -golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= @@ -1265,7 +1265,6 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1282,13 +1281,14 @@ golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -1356,7 +1356,6 @@ golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= @@ -1379,7 +1378,8 @@ google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/ google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.100.0 h1:LGUYIrbW9pzYQQ8NWXlaIVkgnfubVBZbMFb9P8TK374= +google.golang.org/api v0.114.0 h1:1xQPji6cO2E2vLiI+C/XiFAnsn1WV3mjaEwGLhi3grE= +google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= diff --git a/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md b/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md index 61d8ebff..fcdd4e88 100644 --- a/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md +++ b/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md @@ -1,5 +1,11 @@ # Changelog +## Release 3.2.1 (2021-02-04) + +### Changed + +- Upgraded `Masterminds/goutils` to `v1.1.1`. see the [Security Advisory](https://github.com/Masterminds/goutils/security/advisories/GHSA-xg2h-wx96-xgxr) + ## Release 3.2.0 (2020-12-14) ### Added diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go b/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go index cff5af1a..147f756f 100644 --- a/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go +++ b/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go @@ -176,6 +176,11 @@ func (c *Conn) Close() { c.sigconn.Close() } +// Connected returns whether conn is connected +func (c *Conn) Connected() bool { + return c.sysconn.Connected() && c.sigconn.Connected() +} + // NewConnection establishes a connection to a bus using a caller-supplied function. // This allows connecting to remote buses through a user-supplied mechanism. // The supplied function may be called multiple times, and should return independent connections. diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go b/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go index fa04afc7..074148cb 100644 --- a/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go +++ b/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go @@ -417,6 +417,29 @@ func (c *Conn) listUnitsInternal(f storeFunc) ([]UnitStatus, error) { return status, nil } +// GetUnitByPID returns the unit object path of the unit a process ID +// belongs to. It takes a UNIX PID and returns the object path. The PID must +// refer to an existing system process +func (c *Conn) GetUnitByPID(ctx context.Context, pid uint32) (dbus.ObjectPath, error) { + var result dbus.ObjectPath + + err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.GetUnitByPID", 0, pid).Store(&result) + + return result, err +} + +// GetUnitNameByPID returns the name of the unit a process ID belongs to. It +// takes a UNIX PID and returns the object path. The PID must refer to an +// existing system process +func (c *Conn) GetUnitNameByPID(ctx context.Context, pid uint32) (string, error) { + path, err := c.GetUnitByPID(ctx, pid) + if err != nil { + return "", err + } + + return unitName(path), nil +} + // Deprecated: use ListUnitsContext instead. func (c *Conn) ListUnits() ([]UnitStatus, error) { return c.ListUnitsContext(context.Background()) @@ -828,3 +851,14 @@ func (c *Conn) listJobsInternal(ctx context.Context) ([]JobStatus, error) { return status, nil } + +// Freeze the cgroup associated with the unit. +// Note that FreezeUnit and ThawUnit are only supported on systems running with cgroup v2. +func (c *Conn) FreezeUnit(ctx context.Context, unit string) error { + return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.FreezeUnit", 0, unit).Store() +} + +// Unfreeze the cgroup associated with the unit. +func (c *Conn) ThawUnit(ctx context.Context, unit string) error { + return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ThawUnit", 0, unit).Store() +} diff --git a/vendor/github.com/docker/cli/AUTHORS b/vendor/github.com/docker/cli/AUTHORS index 8990f85b..483743c9 100644 --- a/vendor/github.com/docker/cli/AUTHORS +++ b/vendor/github.com/docker/cli/AUTHORS @@ -1,9 +1,10 @@ -# This file lists all individuals having contributed content to the repository. -# For how it is generated, see `scripts/docs/generate-authors.sh`. +# File @generated by scripts/docs/generate-authors.sh. DO NOT EDIT. +# This file lists all contributors to the repository. +# See scripts/docs/generate-authors.sh to make modifications. Aanand Prasad Aaron L. Xu -Aaron Lehmann +Aaron Lehmann Aaron.L.Xu Abdur Rehman Abhinandan Prativadi @@ -24,22 +25,27 @@ Akihiro Suda Akim Demaille Alan Thompson Albert Callarisa +Alberto Roura Albin Kerouanton Aleksa Sarai Aleksander Piotrowski Alessandro Boch +Alex Couture-Beil Alex Mavrogiannis Alex Mayer Alexander Boyd Alexander Larsson -Alexander Morozov +Alexander Morozov Alexander Ryabov Alexandre González +Alexey Igrychev +Alexis Couvreur Alfred Landrum Alicia Lauerman Allen Sun Alvin Deng Amen Belayneh +Amey Shrivastava <72866602+AmeyShrivastava@users.noreply.github.com> Amir Goldstein Amit Krishnan Amit Shukla @@ -48,6 +54,8 @@ Anca Iordache Anda Xu Andrea Luzzardi Andreas Köhler +Andres G. Aragoneses +Andres Leon Rangel Andrew France Andrew Hsu Andrew Macpherson @@ -67,8 +75,9 @@ Antonis Kalipetis Anusha Ragunathan Ao Li Arash Deshmeh -Arko Dasgupta -Arnaud Porterie +Arko Dasgupta +Arnaud Porterie +Arnaud Rebillout Arthur Peka Ashwini Oruganti Azat Khuyiyakhmetov @@ -76,18 +85,23 @@ Bardia Keyoumarsi Barnaby Gray Bastiaan Bakker BastianHofmann +Ben Bodenmiller Ben Bonnefoy Ben Creasy Ben Firshman Benjamin Boudreau +Benjamin Böhmke +Benjamin Nater Benoit Sigoure Bhumika Bayani Bill Wang Bin Liu Bingshen Wang +Bishal Das Boaz Shuster Bogdan Anton Boris Pruessmann +Brad Baker Bradley Cicenas Brandon Mitchell Brandon Philips @@ -96,6 +110,7 @@ Bret Fisher Brian (bex) Exelbierd Brian Goff Brian Wieder +Bruno Sousa Bryan Bess Bryan Boreham Bryan Murphy @@ -114,15 +129,19 @@ Charles Chan Charles Law Charles Smith Charlie Drage +Charlotte Mach ChaYoung You +Chee Hau Lim Chen Chuanliang Chen Hanxiao Chen Mingjie Chen Qiu +Chris Couzens Chris Gavin Chris Gibson Chris McKinnel Chris Snow +Chris Vermilion Chris Weyl Christian Persson Christian Stefanescu @@ -131,6 +150,7 @@ Christophe Vidal Christopher Biscardi Christopher Crone Christopher Jones +Christopher Svensson Christy Norman Chun Chen Clinton Kitson @@ -139,8 +159,10 @@ Colin Hebert Collin Guarino Colm Hally Comical Derskeal <27731088+derskeal@users.noreply.github.com> +Conner Crosby Corey Farrell Corey Quon +Cory Bennet Craig Wilhite Cristian Staretu Daehyeok Mun @@ -170,11 +192,13 @@ Dattatraya Kumbhar Dave Goodchild Dave Henderson Dave Tucker +David Alvarez David Beitey David Calavera David Cramer David Dooling David Gageot +David Karlsson David Lechner David Scott David Sheets @@ -186,7 +210,8 @@ Denis Defreyne Denis Gladkikh Denis Ollier Dennis Docter -Derek McGowan +Derek McGowan +Des Preston Deshi Xiao Dharmit Shah Dhawal Yogesh Bhanushali @@ -196,12 +221,14 @@ Dimitry Andric Ding Fei Diogo Monica Djordje Lukic +Dmitriy Fishman Dmitry Gusev Dmitry Smirnov Dmitry V. Krivenok Dominik Braun Don Kjer Dong Chen +DongGeon Lee Doug Davis Drew Erny Ed Costello @@ -211,12 +238,14 @@ Eli Uriegas Elias Faxö Elliot Luo <956941328@qq.com> Eric Curtin +Eric Engestrom Eric G. Noriega Eric Rosenberg Eric Sage Eric-Olivier Lamey Erica Windisch Erik Hollensbe +Erik Humphrey Erik St. Martin Essam A. Hassan Ethan Haynes @@ -229,8 +258,10 @@ Evelyn Xu Everett Toews Fabio Falci Fabrizio Soppelsa +Felix Geyer Felix Hupfeld Felix Rabe +fezzik1620 Filip Jareš Flavio Crisciani Florian Klein @@ -242,6 +273,7 @@ Frederic Hemberger Frederick F. Kautz IV Frederik Nordahl Jul Sabroe Frieder Bluemle +Gabriel Gore Gabriel Nicolas Avellaneda Gaetan de Villele Gang Qiao @@ -251,13 +283,18 @@ George MacRorie George Xie Gianluca Borello Gildas Cuisinier +Gio d'Amelio +Gleb Stsenov Goksu Toprak Gou Rao +Govind Rai Grant Reaber Greg Pflaum +Gsealy Guilhem Lettron Guillaume J. Charmes Guillaume Le Floch +Guillaume Tardif gwx296173 Günther Jungbluth Hakan Özler @@ -278,6 +315,7 @@ Hugo Gabriel Eyherabide huqun Huu Nguyen Hyzhou Zhy +Iain Samuel McLean Elder Ian Campbell Ian Philpot Ignacio Capurro @@ -287,6 +325,7 @@ Ilya Sotkov Ioan Eugen Stan Isabel Jimenez Ivan Grcic +Ivan Grund Ivan Markin Jacob Atzen Jacob Tomlinson @@ -302,15 +341,18 @@ Jan-Jaap Driessen Jana Radhakrishnan Jared Hocutt Jasmine Hegman +Jason Hall Jason Heiss Jason Plum Jay Kamat +Jean Lecordier Jean Rouge Jean-Christophe Sirot Jean-Pierre Huynh Jeff Lindsay Jeff Nickoloff Jeff Silberman +Jennings Zhang Jeremy Chambers Jeremy Unruh Jeremy Yallop @@ -322,6 +364,7 @@ Jian Zhang Jie Luo Jilles Oldenbeuving Jim Galasyn +Jim Lin Jimmy Leger Jimmy Song jimmyxian @@ -338,6 +381,7 @@ Johannes 'fish' Ziemke John Feminella John Harris John Howard +John Howard John Laswell John Maguire John Mulhausen @@ -347,13 +391,16 @@ John Tims John V. Martinez John Willis Jon Johnson +Jon Zeolla Jonatas Baldin Jonathan Boulle Jonathan Lee Jonathan Lomas Jonathan McCrohan +Jonathan Warriss-Simmons Jonh Wendell Jordan Jennings +Jorge Vallecillo Jose J. Escobar <53836904+jescobar-docker@users.noreply.github.com> Joseph Kern Josh Bodah @@ -383,9 +430,11 @@ Katie McLaughlin Ke Xu Kei Ohmura Keith Hudgins +Kelton Bassingthwaite Ken Cochrane Ken ICHIKAWA Kenfe-Mickaël Laventure +Kevin Alvarez Kevin Burke Kevin Feyrer Kevin Kern @@ -401,6 +450,7 @@ Krasi Georgiev Kris-Mikael Krister Kun Zhang Kunal Kushwaha +Kyle Mitofsky Lachlan Cooper Lai Jiangshan Lars Kellogg-Stedman @@ -410,6 +460,7 @@ Lee Gaines Lei Jitang Lennie Leo Gallucci +Leonid Skorospelov Lewis Daly Li Yi Li Yi @@ -445,6 +496,7 @@ Manjunath A Kumatagi Mansi Nahar mapk0y Marc Bihlmaier +Marc Cornellà Marco Mariani Marco Vedovati Marcus Martins @@ -459,6 +511,7 @@ Mason Fish Mason Malone Mateusz Major Mathieu Champlon +Mathieu Rollet Matt Gucci Matt Robenolt Matteo Orefice @@ -467,11 +520,13 @@ Matthieu Hauglustaine Mauro Porras P Max Shytikov Maxime Petazzoni +Maximillian Fan Xavier Mei ChunTao +Metal <2466052+tedhexaflow@users.noreply.github.com> Micah Zoltu Michael A. Smith Michael Bridgen -Michael Crosby +Michael Crosby Michael Friis Michael Irwin Michael Käufl @@ -487,6 +542,7 @@ Mihai Borobocea Mihuleacc Sergiu Mike Brown Mike Casas +Mike Dalton Mike Danese Mike Dillon Mike Goelzer @@ -503,9 +559,12 @@ Mohini Anne Dsouza Moorthy RS Morgan Bauer Morten Hekkvang +Morten Linderud Moysés Borges +Mozi <29089388+pzhlkj6612@users.noreply.github.com> Mrunal Patel muicoder +Murukesh Mohanan Muthukumar R Máximo Cuadros Mårten Cassel @@ -521,6 +580,7 @@ Nathan LeClaire Nathan McCauley Neil Peterson Nick Adcock +Nick Santos Nico Stapelbroek Nicola Kabar Nicolas Borboën @@ -535,6 +595,8 @@ Noah Treuhaft O.S. Tezer Odin Ugedal ohmystack +OKA Naoya +Oliver Pomeroy Olle Jonsson Olli Janatuinen Oscar Wieman @@ -550,9 +612,12 @@ Paul Lietar Paul Mulders Paul Weaver Pavel Pospisil +Paweł Gronowski +Paweł Pokrywka Paweł Szczekutowicz Peeyush Gupta Per Lundberg +Peter Dave Hello Peter Edge Peter Hsu Peter Jaffe @@ -560,11 +625,13 @@ Peter Kehl Peter Nagy Peter Salvatore Peter Waller -Phil Estes +Phil Estes Philip Alexander Etling Philipp Gillé Philipp Schmied +Phong Tran pidster +Pieter E Smit pixelistik Pratik Karki Prayag Verma @@ -574,6 +641,7 @@ Qiang Huang Qinglan Peng qudongfang Raghavendra K T +Rahul Kadyan Rahul Zoldyck Ravi Shekhar Jethani Ray Tsang @@ -582,6 +650,7 @@ Remy Suen Renaud Gaubert Ricardo N Feliciano Rich Moyse +Richard Chen Zheng <58443436+rchenzheng@users.noreply.github.com> Richard Mathie Richard Scothern Rick Wieman @@ -591,6 +660,7 @@ Rob Gulewich Robert Wallis Robin Naundorf Robin Speekenbrink +Roch Feuillade Rodolfo Ortiz Rogelio Canedo Rohan Verma @@ -609,11 +679,13 @@ Sainath Grandhi Sakeven Jiang Sally O'Malley Sam Neirinck +Sam Thibault Samarth Shah Sambuddha Basu Sami Tabet Samuel Cochran Samuel Karp +Sandro Jäckel Santhosh Manohar Sargun Dhillon Saswat Bhattacharya @@ -643,7 +715,8 @@ Slava Semushin Solomon Hykes Song Gao Spencer Brown -squeegels <1674195+squeegels@users.noreply.github.com> +Spring Lee +squeegels Srini Brahmaroutu Stefan S. Stefan Scherer @@ -654,6 +727,7 @@ Stephen Rust Steve Durrheimer Steve Richards Steven Burgess +Stoica-Marcu Floris-Andrei Subhajit Ghosh Sun Jianbo Sune Keller @@ -665,7 +739,10 @@ Sébastien HOUZÉ T K Sourabh TAGOMORI Satoshi taiji-tech +Takeshi Koenuma +Takuya Noguchi Taylor Jones +Teiva Harsanyi Tejaswini Duggaraju Tengfei Wang Teppei Fukuda @@ -696,6 +773,7 @@ Tom Fotherby Tom Klingenberg Tom Milligan Tom X. Tobin +Tomas Bäckman Tomas Tomecek Tomasz Kopczynski Tomáš Hrčka @@ -711,6 +789,7 @@ Ulrich Bareth Ulysses Souza Umesh Yadav Valentin Lorentz +Vardan Pogosian Venkateswara Reddy Bukkasamudram Veres Lajos Victor Vieux @@ -757,6 +836,7 @@ Yunxiang Huang Zachary Romero Zander Mackie zebrilee +Zeel B Patel Zhang Kun Zhang Wei Zhang Wentao @@ -768,4 +848,5 @@ Zhu Guihua Álex González Álvaro Lázaro Átila Camurça Alves +Александр Менщиков <__Singleton__@hackerdom.ru> 徐俊杰 diff --git a/vendor/github.com/docker/cli/cli/config/config.go b/vendor/github.com/docker/cli/cli/config/config.go index 31ad117d..b7c05c3f 100644 --- a/vendor/github.com/docker/cli/cli/config/config.go +++ b/vendor/github.com/docker/cli/cli/config/config.go @@ -19,7 +19,7 @@ const ( // ConfigFileName is the name of config file ConfigFileName = "config.json" configFileDir = ".docker" - oldConfigfile = ".dockercfg" + oldConfigfile = ".dockercfg" // Deprecated: remove once we stop printing deprecation warning contextsDir = "contexts" ) @@ -84,16 +84,6 @@ func Path(p ...string) (string, error) { return path, nil } -// LegacyLoadFromReader is a convenience function that creates a ConfigFile object from -// a non-nested reader -func LegacyLoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) { - configFile := configfile.ConfigFile{ - AuthConfigs: make(map[string]types.AuthConfig), - } - err := configFile.LegacyLoadFromReader(configData) - return &configFile, err -} - // LoadFromReader is a convenience function that creates a ConfigFile object from // a reader func LoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) { @@ -140,12 +130,8 @@ func load(configDir string) (*configfile.ConfigFile, bool, error) { // Can't find latest config file so check for the old one filename = filepath.Join(getHomeDir(), oldConfigfile) - if file, err := os.Open(filename); err == nil { + if _, err := os.Stat(filename); err == nil { printLegacyFileWarning = true - defer file.Close() - if err := configFile.LegacyLoadFromReader(file); err != nil { - return configFile, printLegacyFileWarning, errors.Wrap(err, filename) - } } return configFile, printLegacyFileWarning, nil } @@ -158,7 +144,7 @@ func LoadDefaultConfigFile(stderr io.Writer) *configfile.ConfigFile { fmt.Fprintf(stderr, "WARNING: Error loading config file: %v\n", err) } if printLegacyFileWarning { - _, _ = fmt.Fprintln(stderr, "WARNING: Support for the legacy ~/.dockercfg configuration file and file-format is deprecated and will be removed in an upcoming release") + _, _ = fmt.Fprintln(stderr, "WARNING: Support for the legacy ~/.dockercfg configuration file and file-format has been removed and the configuration file will be ignored") } if !configFile.ContainsAuth() { configFile.CredentialsStore = credentials.DetectDefaultStore(configFile.CredentialsStore) diff --git a/vendor/github.com/docker/cli/cli/config/configfile/file.go b/vendor/github.com/docker/cli/cli/config/configfile/file.go index d6f71081..609a88c2 100644 --- a/vendor/github.com/docker/cli/cli/config/configfile/file.go +++ b/vendor/github.com/docker/cli/cli/config/configfile/file.go @@ -3,9 +3,7 @@ package configfile import ( "encoding/base64" "encoding/json" - "fmt" "io" - "io/ioutil" "os" "path/filepath" "strings" @@ -16,13 +14,6 @@ import ( "github.com/sirupsen/logrus" ) -const ( - // This constant is only used for really old config files when the - // URL wasn't saved as part of the config file and it was just - // assumed to be this value. - defaultIndexServer = "https://index.docker.io/v1/" -) - // ConfigFile ~/.docker/config.json file info type ConfigFile struct { AuthConfigs map[string]types.AuthConfig `json:"auths"` @@ -46,8 +37,7 @@ type ConfigFile struct { PruneFilters []string `json:"pruneFilters,omitempty"` Proxies map[string]ProxyConfig `json:"proxies,omitempty"` Experimental string `json:"experimental,omitempty"` - StackOrchestrator string `json:"stackOrchestrator,omitempty"` - Kubernetes *KubernetesConfig `json:"kubernetes,omitempty"` + StackOrchestrator string `json:"stackOrchestrator,omitempty"` // Deprecated: swarm is now the default orchestrator, and this option is ignored. CurrentContext string `json:"currentContext,omitempty"` CLIPluginsExtraDirs []string `json:"cliPluginsExtraDirs,omitempty"` Plugins map[string]map[string]string `json:"plugins,omitempty"` @@ -60,11 +50,7 @@ type ProxyConfig struct { HTTPSProxy string `json:"httpsProxy,omitempty"` NoProxy string `json:"noProxy,omitempty"` FTPProxy string `json:"ftpProxy,omitempty"` -} - -// KubernetesConfig contains Kubernetes orchestrator settings -type KubernetesConfig struct { - AllNamespaces string `json:"allNamespaces,omitempty"` + AllProxy string `json:"allProxy,omitempty"` } // New initializes an empty configuration file for the given filename 'fn' @@ -78,44 +64,6 @@ func New(fn string) *ConfigFile { } } -// LegacyLoadFromReader reads the non-nested configuration data given and sets up the -// auth config information with given directory and populates the receiver object -func (configFile *ConfigFile) LegacyLoadFromReader(configData io.Reader) error { - b, err := ioutil.ReadAll(configData) - if err != nil { - return err - } - - if err := json.Unmarshal(b, &configFile.AuthConfigs); err != nil { - arr := strings.Split(string(b), "\n") - if len(arr) < 2 { - return errors.Errorf("The Auth config file is empty") - } - authConfig := types.AuthConfig{} - origAuth := strings.Split(arr[0], " = ") - if len(origAuth) != 2 { - return errors.Errorf("Invalid Auth config file") - } - authConfig.Username, authConfig.Password, err = decodeAuth(origAuth[1]) - if err != nil { - return err - } - authConfig.ServerAddress = defaultIndexServer - configFile.AuthConfigs[defaultIndexServer] = authConfig - } else { - for k, authConfig := range configFile.AuthConfigs { - authConfig.Username, authConfig.Password, err = decodeAuth(authConfig.Auth) - if err != nil { - return err - } - authConfig.Auth = "" - authConfig.ServerAddress = k - configFile.AuthConfigs[k] = authConfig - } - } - return nil -} - // LoadFromReader reads the configuration data given and sets up the auth config // information with given directory and populates the receiver object func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error { @@ -134,7 +82,7 @@ func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error { ac.ServerAddress = addr configFile.AuthConfigs[addr] = ac } - return checkKubernetesConfiguration(configFile.Kubernetes) + return nil } // ContainsAuth returns whether there is authentication configured @@ -191,10 +139,10 @@ func (configFile *ConfigFile) Save() (retErr error) { } dir := filepath.Dir(configFile.Filename) - if err := os.MkdirAll(dir, 0700); err != nil { + if err := os.MkdirAll(dir, 0o700); err != nil { return err } - temp, err := ioutil.TempFile(dir, filepath.Base(configFile.Filename)) + temp, err := os.CreateTemp(dir, filepath.Base(configFile.Filename)) if err != nil { return err } @@ -244,6 +192,7 @@ func (configFile *ConfigFile) ParseProxyConfig(host string, runOpts map[string]* "HTTPS_PROXY": &config.HTTPSProxy, "NO_PROXY": &config.NoProxy, "FTP_PROXY": &config.FTPProxy, + "ALL_PROXY": &config.AllProxy, } m := runOpts if m == nil { @@ -292,12 +241,11 @@ func decodeAuth(authStr string) (string, string, error) { if n > decLen { return "", "", errors.Errorf("Something went wrong decoding auth config") } - arr := strings.SplitN(string(decoded), ":", 2) - if len(arr) != 2 { + userName, password, ok := strings.Cut(string(decoded), ":") + if !ok || userName == "" { return "", "", errors.Errorf("Invalid auth configuration file") } - password := strings.Trim(arr[1], "\x00") - return arr[0], password, nil + return userName, strings.Trim(password, "\x00"), nil } // GetCredentialsStore returns a new credentials store from the settings in the @@ -352,7 +300,8 @@ func (configFile *ConfigFile) GetAllCredentials() (map[string]types.AuthConfig, for registryHostname := range configFile.CredentialHelpers { newAuth, err := configFile.GetAuthConfig(registryHostname) if err != nil { - return nil, err + logrus.WithError(err).Warnf("Failed to get credentials for registry: %s", registryHostname) + continue } auths[registryHostname] = newAuth } @@ -399,17 +348,3 @@ func (configFile *ConfigFile) SetPluginConfig(pluginname, option, value string) delete(configFile.Plugins, pluginname) } } - -func checkKubernetesConfiguration(kubeConfig *KubernetesConfig) error { - if kubeConfig == nil { - return nil - } - switch kubeConfig.AllNamespaces { - case "": - case "enabled": - case "disabled": - default: - return fmt.Errorf("invalid 'kubernetes.allNamespaces' value, should be 'enabled' or 'disabled': %s", kubeConfig.AllNamespaces) - } - return nil -} diff --git a/vendor/github.com/docker/cli/cli/config/configfile/file_unix.go b/vendor/github.com/docker/cli/cli/config/configfile/file_unix.go index 6af67181..35388754 100644 --- a/vendor/github.com/docker/cli/cli/config/configfile/file_unix.go +++ b/vendor/github.com/docker/cli/cli/config/configfile/file_unix.go @@ -12,7 +12,7 @@ import ( // ignoring any error during the process. func copyFilePermissions(src, dst string) { var ( - mode os.FileMode = 0600 + mode os.FileMode = 0o600 uid, gid int ) diff --git a/vendor/github.com/docker/cli/cli/config/credentials/file_store.go b/vendor/github.com/docker/cli/cli/config/credentials/file_store.go index e509820b..de1c676e 100644 --- a/vendor/github.com/docker/cli/cli/config/credentials/file_store.go +++ b/vendor/github.com/docker/cli/cli/config/credentials/file_store.go @@ -75,7 +75,6 @@ func ConvertToHostname(url string) string { stripped = strings.TrimPrefix(url, "https://") } - nameParts := strings.SplitN(stripped, "/", 2) - - return nameParts[0] + hostName, _, _ := strings.Cut(stripped, "/") + return hostName } diff --git a/vendor/github.com/google/go-cmp/LICENSE b/vendor/github.com/google/go-cmp/LICENSE new file mode 100644 index 00000000..32017f8f --- /dev/null +++ b/vendor/github.com/google/go-cmp/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2017 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go new file mode 100644 index 00000000..087320da --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/compare.go @@ -0,0 +1,669 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cmp determines equality of values. +// +// This package is intended to be a more powerful and safer alternative to +// reflect.DeepEqual for comparing whether two values are semantically equal. +// It is intended to only be used in tests, as performance is not a goal and +// it may panic if it cannot compare the values. Its propensity towards +// panicking means that its unsuitable for production environments where a +// spurious panic may be fatal. +// +// The primary features of cmp are: +// +// - When the default behavior of equality does not suit the test's needs, +// custom equality functions can override the equality operation. +// For example, an equality function may report floats as equal so long as +// they are within some tolerance of each other. +// +// - Types with an Equal method may use that method to determine equality. +// This allows package authors to determine the equality operation +// for the types that they define. +// +// - If no custom equality functions are used and no Equal method is defined, +// equality is determined by recursively comparing the primitive kinds on +// both values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, +// unexported fields are not compared by default; they result in panics +// unless suppressed by using an Ignore option (see cmpopts.IgnoreUnexported) +// or explicitly compared using the Exporter option. +package cmp + +import ( + "fmt" + "reflect" + "strings" + + "github.com/google/go-cmp/cmp/internal/diff" + "github.com/google/go-cmp/cmp/internal/function" + "github.com/google/go-cmp/cmp/internal/value" +) + +// TODO(≥go1.18): Use any instead of interface{}. + +// Equal reports whether x and y are equal by recursively applying the +// following rules in the given order to x and y and all of their sub-values: +// +// - Let S be the set of all Ignore, Transformer, and Comparer options that +// remain after applying all path filters, value filters, and type filters. +// If at least one Ignore exists in S, then the comparison is ignored. +// If the number of Transformer and Comparer options in S is non-zero, +// then Equal panics because it is ambiguous which option to use. +// If S contains a single Transformer, then use that to transform +// the current values and recursively call Equal on the output values. +// If S contains a single Comparer, then use that to compare the current values. +// Otherwise, evaluation proceeds to the next rule. +// +// - If the values have an Equal method of the form "(T) Equal(T) bool" or +// "(T) Equal(I) bool" where T is assignable to I, then use the result of +// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and +// evaluation proceeds to the next rule. +// +// - Lastly, try to compare x and y based on their basic kinds. +// Simple kinds like booleans, integers, floats, complex numbers, strings, +// and channels are compared using the equivalent of the == operator in Go. +// Functions are only equal if they are both nil, otherwise they are unequal. +// +// Structs are equal if recursively calling Equal on all fields report equal. +// If a struct contains unexported fields, Equal panics unless an Ignore option +// (e.g., cmpopts.IgnoreUnexported) ignores that field or the Exporter option +// explicitly permits comparing the unexported field. +// +// Slices are equal if they are both nil or both non-nil, where recursively +// calling Equal on all non-ignored slice or array elements report equal. +// Empty non-nil slices and nil slices are not equal; to equate empty slices, +// consider using cmpopts.EquateEmpty. +// +// Maps are equal if they are both nil or both non-nil, where recursively +// calling Equal on all non-ignored map entries report equal. +// Map keys are equal according to the == operator. +// To use custom comparisons for map keys, consider using cmpopts.SortMaps. +// Empty non-nil maps and nil maps are not equal; to equate empty maps, +// consider using cmpopts.EquateEmpty. +// +// Pointers and interfaces are equal if they are both nil or both non-nil, +// where they have the same underlying concrete type and recursively +// calling Equal on the underlying values reports equal. +// +// Before recursing into a pointer, slice element, or map, the current path +// is checked to detect whether the address has already been visited. +// If there is a cycle, then the pointed at values are considered equal +// only if both addresses were previously visited in the same path step. +func Equal(x, y interface{}, opts ...Option) bool { + s := newState(opts) + s.compareAny(rootStep(x, y)) + return s.result.Equal() +} + +// Diff returns a human-readable report of the differences between two values: +// y - x. It returns an empty string if and only if Equal returns true for the +// same input values and options. +// +// The output is displayed as a literal in pseudo-Go syntax. +// At the start of each line, a "-" prefix indicates an element removed from x, +// a "+" prefix to indicates an element added from y, and the lack of a prefix +// indicates an element common to both x and y. If possible, the output +// uses fmt.Stringer.String or error.Error methods to produce more humanly +// readable outputs. In such cases, the string is prefixed with either an +// 's' or 'e' character, respectively, to indicate that the method was called. +// +// Do not depend on this output being stable. If you need the ability to +// programmatically interpret the difference, consider using a custom Reporter. +func Diff(x, y interface{}, opts ...Option) string { + s := newState(opts) + + // Optimization: If there are no other reporters, we can optimize for the + // common case where the result is equal (and thus no reported difference). + // This avoids the expensive construction of a difference tree. + if len(s.reporters) == 0 { + s.compareAny(rootStep(x, y)) + if s.result.Equal() { + return "" + } + s.result = diff.Result{} // Reset results + } + + r := new(defaultReporter) + s.reporters = append(s.reporters, reporter{r}) + s.compareAny(rootStep(x, y)) + d := r.String() + if (d == "") != s.result.Equal() { + panic("inconsistent difference and equality results") + } + return d +} + +// rootStep constructs the first path step. If x and y have differing types, +// then they are stored within an empty interface type. +func rootStep(x, y interface{}) PathStep { + vx := reflect.ValueOf(x) + vy := reflect.ValueOf(y) + + // If the inputs are different types, auto-wrap them in an empty interface + // so that they have the same parent type. + var t reflect.Type + if !vx.IsValid() || !vy.IsValid() || vx.Type() != vy.Type() { + t = anyType + if vx.IsValid() { + vvx := reflect.New(t).Elem() + vvx.Set(vx) + vx = vvx + } + if vy.IsValid() { + vvy := reflect.New(t).Elem() + vvy.Set(vy) + vy = vvy + } + } else { + t = vx.Type() + } + + return &pathStep{t, vx, vy} +} + +type state struct { + // These fields represent the "comparison state". + // Calling statelessCompare must not result in observable changes to these. + result diff.Result // The current result of comparison + curPath Path // The current path in the value tree + curPtrs pointerPath // The current set of visited pointers + reporters []reporter // Optional reporters + + // recChecker checks for infinite cycles applying the same set of + // transformers upon the output of itself. + recChecker recChecker + + // dynChecker triggers pseudo-random checks for option correctness. + // It is safe for statelessCompare to mutate this value. + dynChecker dynChecker + + // These fields, once set by processOption, will not change. + exporters []exporter // List of exporters for structs with unexported fields + opts Options // List of all fundamental and filter options +} + +func newState(opts []Option) *state { + // Always ensure a validator option exists to validate the inputs. + s := &state{opts: Options{validator{}}} + s.curPtrs.Init() + s.processOption(Options(opts)) + return s +} + +func (s *state) processOption(opt Option) { + switch opt := opt.(type) { + case nil: + case Options: + for _, o := range opt { + s.processOption(o) + } + case coreOption: + type filtered interface { + isFiltered() bool + } + if fopt, ok := opt.(filtered); ok && !fopt.isFiltered() { + panic(fmt.Sprintf("cannot use an unfiltered option: %v", opt)) + } + s.opts = append(s.opts, opt) + case exporter: + s.exporters = append(s.exporters, opt) + case reporter: + s.reporters = append(s.reporters, opt) + default: + panic(fmt.Sprintf("unknown option %T", opt)) + } +} + +// statelessCompare compares two values and returns the result. +// This function is stateless in that it does not alter the current result, +// or output to any registered reporters. +func (s *state) statelessCompare(step PathStep) diff.Result { + // We do not save and restore curPath and curPtrs because all of the + // compareX methods should properly push and pop from them. + // It is an implementation bug if the contents of the paths differ from + // when calling this function to when returning from it. + + oldResult, oldReporters := s.result, s.reporters + s.result = diff.Result{} // Reset result + s.reporters = nil // Remove reporters to avoid spurious printouts + s.compareAny(step) + res := s.result + s.result, s.reporters = oldResult, oldReporters + return res +} + +func (s *state) compareAny(step PathStep) { + // Update the path stack. + s.curPath.push(step) + defer s.curPath.pop() + for _, r := range s.reporters { + r.PushStep(step) + defer r.PopStep() + } + s.recChecker.Check(s.curPath) + + // Cycle-detection for slice elements (see NOTE in compareSlice). + t := step.Type() + vx, vy := step.Values() + if si, ok := step.(SliceIndex); ok && si.isSlice && vx.IsValid() && vy.IsValid() { + px, py := vx.Addr(), vy.Addr() + if eq, visited := s.curPtrs.Push(px, py); visited { + s.report(eq, reportByCycle) + return + } + defer s.curPtrs.Pop(px, py) + } + + // Rule 1: Check whether an option applies on this node in the value tree. + if s.tryOptions(t, vx, vy) { + return + } + + // Rule 2: Check whether the type has a valid Equal method. + if s.tryMethod(t, vx, vy) { + return + } + + // Rule 3: Compare based on the underlying kind. + switch t.Kind() { + case reflect.Bool: + s.report(vx.Bool() == vy.Bool(), 0) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + s.report(vx.Int() == vy.Int(), 0) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + s.report(vx.Uint() == vy.Uint(), 0) + case reflect.Float32, reflect.Float64: + s.report(vx.Float() == vy.Float(), 0) + case reflect.Complex64, reflect.Complex128: + s.report(vx.Complex() == vy.Complex(), 0) + case reflect.String: + s.report(vx.String() == vy.String(), 0) + case reflect.Chan, reflect.UnsafePointer: + s.report(vx.Pointer() == vy.Pointer(), 0) + case reflect.Func: + s.report(vx.IsNil() && vy.IsNil(), 0) + case reflect.Struct: + s.compareStruct(t, vx, vy) + case reflect.Slice, reflect.Array: + s.compareSlice(t, vx, vy) + case reflect.Map: + s.compareMap(t, vx, vy) + case reflect.Ptr: + s.comparePtr(t, vx, vy) + case reflect.Interface: + s.compareInterface(t, vx, vy) + default: + panic(fmt.Sprintf("%v kind not handled", t.Kind())) + } +} + +func (s *state) tryOptions(t reflect.Type, vx, vy reflect.Value) bool { + // Evaluate all filters and apply the remaining options. + if opt := s.opts.filter(s, t, vx, vy); opt != nil { + opt.apply(s, vx, vy) + return true + } + return false +} + +func (s *state) tryMethod(t reflect.Type, vx, vy reflect.Value) bool { + // Check if this type even has an Equal method. + m, ok := t.MethodByName("Equal") + if !ok || !function.IsType(m.Type, function.EqualAssignable) { + return false + } + + eq := s.callTTBFunc(m.Func, vx, vy) + s.report(eq, reportByMethod) + return true +} + +func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value { + if !s.dynChecker.Next() { + return f.Call([]reflect.Value{v})[0] + } + + // Run the function twice and ensure that we get the same results back. + // We run in goroutines so that the race detector (if enabled) can detect + // unsafe mutations to the input. + c := make(chan reflect.Value) + go detectRaces(c, f, v) + got := <-c + want := f.Call([]reflect.Value{v})[0] + if step.vx, step.vy = got, want; !s.statelessCompare(step).Equal() { + // To avoid false-positives with non-reflexive equality operations, + // we sanity check whether a value is equal to itself. + if step.vx, step.vy = want, want; !s.statelessCompare(step).Equal() { + return want + } + panic(fmt.Sprintf("non-deterministic function detected: %s", function.NameOf(f))) + } + return want +} + +func (s *state) callTTBFunc(f, x, y reflect.Value) bool { + if !s.dynChecker.Next() { + return f.Call([]reflect.Value{x, y})[0].Bool() + } + + // Swapping the input arguments is sufficient to check that + // f is symmetric and deterministic. + // We run in goroutines so that the race detector (if enabled) can detect + // unsafe mutations to the input. + c := make(chan reflect.Value) + go detectRaces(c, f, y, x) + got := <-c + want := f.Call([]reflect.Value{x, y})[0].Bool() + if !got.IsValid() || got.Bool() != want { + panic(fmt.Sprintf("non-deterministic or non-symmetric function detected: %s", function.NameOf(f))) + } + return want +} + +func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) { + var ret reflect.Value + defer func() { + recover() // Ignore panics, let the other call to f panic instead + c <- ret + }() + ret = f.Call(vs)[0] +} + +func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) { + var addr bool + var vax, vay reflect.Value // Addressable versions of vx and vy + + var mayForce, mayForceInit bool + step := StructField{&structField{}} + for i := 0; i < t.NumField(); i++ { + step.typ = t.Field(i).Type + step.vx = vx.Field(i) + step.vy = vy.Field(i) + step.name = t.Field(i).Name + step.idx = i + step.unexported = !isExported(step.name) + if step.unexported { + if step.name == "_" { + continue + } + // Defer checking of unexported fields until later to give an + // Ignore a chance to ignore the field. + if !vax.IsValid() || !vay.IsValid() { + // For retrieveUnexportedField to work, the parent struct must + // be addressable. Create a new copy of the values if + // necessary to make them addressable. + addr = vx.CanAddr() || vy.CanAddr() + vax = makeAddressable(vx) + vay = makeAddressable(vy) + } + if !mayForceInit { + for _, xf := range s.exporters { + mayForce = mayForce || xf(t) + } + mayForceInit = true + } + step.mayForce = mayForce + step.paddr = addr + step.pvx = vax + step.pvy = vay + step.field = t.Field(i) + } + s.compareAny(step) + } +} + +func (s *state) compareSlice(t reflect.Type, vx, vy reflect.Value) { + isSlice := t.Kind() == reflect.Slice + if isSlice && (vx.IsNil() || vy.IsNil()) { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + + // NOTE: It is incorrect to call curPtrs.Push on the slice header pointer + // since slices represents a list of pointers, rather than a single pointer. + // The pointer checking logic must be handled on a per-element basis + // in compareAny. + // + // A slice header (see reflect.SliceHeader) in Go is a tuple of a starting + // pointer P, a length N, and a capacity C. Supposing each slice element has + // a memory size of M, then the slice is equivalent to the list of pointers: + // [P+i*M for i in range(N)] + // + // For example, v[:0] and v[:1] are slices with the same starting pointer, + // but they are clearly different values. Using the slice pointer alone + // violates the assumption that equal pointers implies equal values. + + step := SliceIndex{&sliceIndex{pathStep: pathStep{typ: t.Elem()}, isSlice: isSlice}} + withIndexes := func(ix, iy int) SliceIndex { + if ix >= 0 { + step.vx, step.xkey = vx.Index(ix), ix + } else { + step.vx, step.xkey = reflect.Value{}, -1 + } + if iy >= 0 { + step.vy, step.ykey = vy.Index(iy), iy + } else { + step.vy, step.ykey = reflect.Value{}, -1 + } + return step + } + + // Ignore options are able to ignore missing elements in a slice. + // However, detecting these reliably requires an optimal differencing + // algorithm, for which diff.Difference is not. + // + // Instead, we first iterate through both slices to detect which elements + // would be ignored if standing alone. The index of non-discarded elements + // are stored in a separate slice, which diffing is then performed on. + var indexesX, indexesY []int + var ignoredX, ignoredY []bool + for ix := 0; ix < vx.Len(); ix++ { + ignored := s.statelessCompare(withIndexes(ix, -1)).NumDiff == 0 + if !ignored { + indexesX = append(indexesX, ix) + } + ignoredX = append(ignoredX, ignored) + } + for iy := 0; iy < vy.Len(); iy++ { + ignored := s.statelessCompare(withIndexes(-1, iy)).NumDiff == 0 + if !ignored { + indexesY = append(indexesY, iy) + } + ignoredY = append(ignoredY, ignored) + } + + // Compute an edit-script for slices vx and vy (excluding ignored elements). + edits := diff.Difference(len(indexesX), len(indexesY), func(ix, iy int) diff.Result { + return s.statelessCompare(withIndexes(indexesX[ix], indexesY[iy])) + }) + + // Replay the ignore-scripts and the edit-script. + var ix, iy int + for ix < vx.Len() || iy < vy.Len() { + var e diff.EditType + switch { + case ix < len(ignoredX) && ignoredX[ix]: + e = diff.UniqueX + case iy < len(ignoredY) && ignoredY[iy]: + e = diff.UniqueY + default: + e, edits = edits[0], edits[1:] + } + switch e { + case diff.UniqueX: + s.compareAny(withIndexes(ix, -1)) + ix++ + case diff.UniqueY: + s.compareAny(withIndexes(-1, iy)) + iy++ + default: + s.compareAny(withIndexes(ix, iy)) + ix++ + iy++ + } + } +} + +func (s *state) compareMap(t reflect.Type, vx, vy reflect.Value) { + if vx.IsNil() || vy.IsNil() { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + + // Cycle-detection for maps. + if eq, visited := s.curPtrs.Push(vx, vy); visited { + s.report(eq, reportByCycle) + return + } + defer s.curPtrs.Pop(vx, vy) + + // We combine and sort the two map keys so that we can perform the + // comparisons in a deterministic order. + step := MapIndex{&mapIndex{pathStep: pathStep{typ: t.Elem()}}} + for _, k := range value.SortKeys(append(vx.MapKeys(), vy.MapKeys()...)) { + step.vx = vx.MapIndex(k) + step.vy = vy.MapIndex(k) + step.key = k + if !step.vx.IsValid() && !step.vy.IsValid() { + // It is possible for both vx and vy to be invalid if the + // key contained a NaN value in it. + // + // Even with the ability to retrieve NaN keys in Go 1.12, + // there still isn't a sensible way to compare the values since + // a NaN key may map to multiple unordered values. + // The most reasonable way to compare NaNs would be to compare the + // set of values. However, this is impossible to do efficiently + // since set equality is provably an O(n^2) operation given only + // an Equal function. If we had a Less function or Hash function, + // this could be done in O(n*log(n)) or O(n), respectively. + // + // Rather than adding complex logic to deal with NaNs, make it + // the user's responsibility to compare such obscure maps. + const help = "consider providing a Comparer to compare the map" + panic(fmt.Sprintf("%#v has map key with NaNs\n%s", s.curPath, help)) + } + s.compareAny(step) + } +} + +func (s *state) comparePtr(t reflect.Type, vx, vy reflect.Value) { + if vx.IsNil() || vy.IsNil() { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + + // Cycle-detection for pointers. + if eq, visited := s.curPtrs.Push(vx, vy); visited { + s.report(eq, reportByCycle) + return + } + defer s.curPtrs.Pop(vx, vy) + + vx, vy = vx.Elem(), vy.Elem() + s.compareAny(Indirect{&indirect{pathStep{t.Elem(), vx, vy}}}) +} + +func (s *state) compareInterface(t reflect.Type, vx, vy reflect.Value) { + if vx.IsNil() || vy.IsNil() { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + vx, vy = vx.Elem(), vy.Elem() + if vx.Type() != vy.Type() { + s.report(false, 0) + return + } + s.compareAny(TypeAssertion{&typeAssertion{pathStep{vx.Type(), vx, vy}}}) +} + +func (s *state) report(eq bool, rf resultFlags) { + if rf&reportByIgnore == 0 { + if eq { + s.result.NumSame++ + rf |= reportEqual + } else { + s.result.NumDiff++ + rf |= reportUnequal + } + } + for _, r := range s.reporters { + r.Report(Result{flags: rf}) + } +} + +// recChecker tracks the state needed to periodically perform checks that +// user provided transformers are not stuck in an infinitely recursive cycle. +type recChecker struct{ next int } + +// Check scans the Path for any recursive transformers and panics when any +// recursive transformers are detected. Note that the presence of a +// recursive Transformer does not necessarily imply an infinite cycle. +// As such, this check only activates after some minimal number of path steps. +func (rc *recChecker) Check(p Path) { + const minLen = 1 << 16 + if rc.next == 0 { + rc.next = minLen + } + if len(p) < rc.next { + return + } + rc.next <<= 1 + + // Check whether the same transformer has appeared at least twice. + var ss []string + m := map[Option]int{} + for _, ps := range p { + if t, ok := ps.(Transform); ok { + t := t.Option() + if m[t] == 1 { // Transformer was used exactly once before + tf := t.(*transformer).fnc.Type() + ss = append(ss, fmt.Sprintf("%v: %v => %v", t, tf.In(0), tf.Out(0))) + } + m[t]++ + } + } + if len(ss) > 0 { + const warning = "recursive set of Transformers detected" + const help = "consider using cmpopts.AcyclicTransformer" + set := strings.Join(ss, "\n\t") + panic(fmt.Sprintf("%s:\n\t%s\n%s", warning, set, help)) + } +} + +// dynChecker tracks the state needed to periodically perform checks that +// user provided functions are symmetric and deterministic. +// The zero value is safe for immediate use. +type dynChecker struct{ curr, next int } + +// Next increments the state and reports whether a check should be performed. +// +// Checks occur every Nth function call, where N is a triangular number: +// +// 0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ... +// +// See https://en.wikipedia.org/wiki/Triangular_number +// +// This sequence ensures that the cost of checks drops significantly as +// the number of functions calls grows larger. +func (dc *dynChecker) Next() bool { + ok := dc.curr == dc.next + if ok { + dc.curr = 0 + dc.next++ + } + dc.curr++ + return ok +} + +// makeAddressable returns a value that is always addressable. +// It returns the input verbatim if it is already addressable, +// otherwise it creates a new value and returns an addressable copy. +func makeAddressable(v reflect.Value) reflect.Value { + if v.CanAddr() { + return v + } + vc := reflect.New(v.Type()).Elem() + vc.Set(v) + return vc +} diff --git a/vendor/github.com/google/go-cmp/cmp/export_panic.go b/vendor/github.com/google/go-cmp/cmp/export_panic.go new file mode 100644 index 00000000..ae851fe5 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/export_panic.go @@ -0,0 +1,16 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build purego +// +build purego + +package cmp + +import "reflect" + +const supportExporters = false + +func retrieveUnexportedField(reflect.Value, reflect.StructField, bool) reflect.Value { + panic("no support for forcibly accessing unexported fields") +} diff --git a/vendor/github.com/google/go-cmp/cmp/export_unsafe.go b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go new file mode 100644 index 00000000..e2c0f74e --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go @@ -0,0 +1,36 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !purego +// +build !purego + +package cmp + +import ( + "reflect" + "unsafe" +) + +const supportExporters = true + +// retrieveUnexportedField uses unsafe to forcibly retrieve any field from +// a struct such that the value has read-write permissions. +// +// The parent struct, v, must be addressable, while f must be a StructField +// describing the field to retrieve. If addr is false, +// then the returned value will be shallowed copied to be non-addressable. +func retrieveUnexportedField(v reflect.Value, f reflect.StructField, addr bool) reflect.Value { + ve := reflect.NewAt(f.Type, unsafe.Pointer(uintptr(unsafe.Pointer(v.UnsafeAddr()))+f.Offset)).Elem() + if !addr { + // A field is addressable if and only if the struct is addressable. + // If the original parent value was not addressable, shallow copy the + // value to make it non-addressable to avoid leaking an implementation + // detail of how forcibly exporting a field works. + if ve.Kind() == reflect.Interface && ve.IsNil() { + return reflect.Zero(f.Type) + } + return reflect.ValueOf(ve.Interface()).Convert(f.Type) + } + return ve +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go new file mode 100644 index 00000000..36062a60 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go @@ -0,0 +1,18 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !cmp_debug +// +build !cmp_debug + +package diff + +var debug debugger + +type debugger struct{} + +func (debugger) Begin(_, _ int, f EqualFunc, _, _ *EditScript) EqualFunc { + return f +} +func (debugger) Update() {} +func (debugger) Finish() {} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go new file mode 100644 index 00000000..a3b97a1a --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go @@ -0,0 +1,123 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build cmp_debug +// +build cmp_debug + +package diff + +import ( + "fmt" + "strings" + "sync" + "time" +) + +// The algorithm can be seen running in real-time by enabling debugging: +// go test -tags=cmp_debug -v +// +// Example output: +// === RUN TestDifference/#34 +// ┌───────────────────────────────┐ +// │ \ · · · · · · · · · · · · · · │ +// │ · # · · · · · · · · · · · · · │ +// │ · \ · · · · · · · · · · · · · │ +// │ · · \ · · · · · · · · · · · · │ +// │ · · · X # · · · · · · · · · · │ +// │ · · · # \ · · · · · · · · · · │ +// │ · · · · · # # · · · · · · · · │ +// │ · · · · · # \ · · · · · · · · │ +// │ · · · · · · · \ · · · · · · · │ +// │ · · · · · · · · \ · · · · · · │ +// │ · · · · · · · · · \ · · · · · │ +// │ · · · · · · · · · · \ · · # · │ +// │ · · · · · · · · · · · \ # # · │ +// │ · · · · · · · · · · · # # # · │ +// │ · · · · · · · · · · # # # # · │ +// │ · · · · · · · · · # # # # # · │ +// │ · · · · · · · · · · · · · · \ │ +// └───────────────────────────────┘ +// [.Y..M.XY......YXYXY.|] +// +// The grid represents the edit-graph where the horizontal axis represents +// list X and the vertical axis represents list Y. The start of the two lists +// is the top-left, while the ends are the bottom-right. The '·' represents +// an unexplored node in the graph. The '\' indicates that the two symbols +// from list X and Y are equal. The 'X' indicates that two symbols are similar +// (but not exactly equal) to each other. The '#' indicates that the two symbols +// are different (and not similar). The algorithm traverses this graph trying to +// make the paths starting in the top-left and the bottom-right connect. +// +// The series of '.', 'X', 'Y', and 'M' characters at the bottom represents +// the currently established path from the forward and reverse searches, +// separated by a '|' character. + +const ( + updateDelay = 100 * time.Millisecond + finishDelay = 500 * time.Millisecond + ansiTerminal = true // ANSI escape codes used to move terminal cursor +) + +var debug debugger + +type debugger struct { + sync.Mutex + p1, p2 EditScript + fwdPath, revPath *EditScript + grid []byte + lines int +} + +func (dbg *debugger) Begin(nx, ny int, f EqualFunc, p1, p2 *EditScript) EqualFunc { + dbg.Lock() + dbg.fwdPath, dbg.revPath = p1, p2 + top := "┌─" + strings.Repeat("──", nx) + "┐\n" + row := "│ " + strings.Repeat("· ", nx) + "│\n" + btm := "└─" + strings.Repeat("──", nx) + "┘\n" + dbg.grid = []byte(top + strings.Repeat(row, ny) + btm) + dbg.lines = strings.Count(dbg.String(), "\n") + fmt.Print(dbg) + + // Wrap the EqualFunc so that we can intercept each result. + return func(ix, iy int) (r Result) { + cell := dbg.grid[len(top)+iy*len(row):][len("│ ")+len("· ")*ix:][:len("·")] + for i := range cell { + cell[i] = 0 // Zero out the multiple bytes of UTF-8 middle-dot + } + switch r = f(ix, iy); { + case r.Equal(): + cell[0] = '\\' + case r.Similar(): + cell[0] = 'X' + default: + cell[0] = '#' + } + return + } +} + +func (dbg *debugger) Update() { + dbg.print(updateDelay) +} + +func (dbg *debugger) Finish() { + dbg.print(finishDelay) + dbg.Unlock() +} + +func (dbg *debugger) String() string { + dbg.p1, dbg.p2 = *dbg.fwdPath, dbg.p2[:0] + for i := len(*dbg.revPath) - 1; i >= 0; i-- { + dbg.p2 = append(dbg.p2, (*dbg.revPath)[i]) + } + return fmt.Sprintf("%s[%v|%v]\n\n", dbg.grid, dbg.p1, dbg.p2) +} + +func (dbg *debugger) print(d time.Duration) { + if ansiTerminal { + fmt.Printf("\x1b[%dA", dbg.lines) // Reset terminal cursor + } + fmt.Print(dbg) + time.Sleep(d) +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go new file mode 100644 index 00000000..a248e543 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go @@ -0,0 +1,402 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package diff implements an algorithm for producing edit-scripts. +// The edit-script is a sequence of operations needed to transform one list +// of symbols into another (or vice-versa). The edits allowed are insertions, +// deletions, and modifications. The summation of all edits is called the +// Levenshtein distance as this problem is well-known in computer science. +// +// This package prioritizes performance over accuracy. That is, the run time +// is more important than obtaining a minimal Levenshtein distance. +package diff + +import ( + "math/rand" + "time" + + "github.com/google/go-cmp/cmp/internal/flags" +) + +// EditType represents a single operation within an edit-script. +type EditType uint8 + +const ( + // Identity indicates that a symbol pair is identical in both list X and Y. + Identity EditType = iota + // UniqueX indicates that a symbol only exists in X and not Y. + UniqueX + // UniqueY indicates that a symbol only exists in Y and not X. + UniqueY + // Modified indicates that a symbol pair is a modification of each other. + Modified +) + +// EditScript represents the series of differences between two lists. +type EditScript []EditType + +// String returns a human-readable string representing the edit-script where +// Identity, UniqueX, UniqueY, and Modified are represented by the +// '.', 'X', 'Y', and 'M' characters, respectively. +func (es EditScript) String() string { + b := make([]byte, len(es)) + for i, e := range es { + switch e { + case Identity: + b[i] = '.' + case UniqueX: + b[i] = 'X' + case UniqueY: + b[i] = 'Y' + case Modified: + b[i] = 'M' + default: + panic("invalid edit-type") + } + } + return string(b) +} + +// stats returns a histogram of the number of each type of edit operation. +func (es EditScript) stats() (s struct{ NI, NX, NY, NM int }) { + for _, e := range es { + switch e { + case Identity: + s.NI++ + case UniqueX: + s.NX++ + case UniqueY: + s.NY++ + case Modified: + s.NM++ + default: + panic("invalid edit-type") + } + } + return +} + +// Dist is the Levenshtein distance and is guaranteed to be 0 if and only if +// lists X and Y are equal. +func (es EditScript) Dist() int { return len(es) - es.stats().NI } + +// LenX is the length of the X list. +func (es EditScript) LenX() int { return len(es) - es.stats().NY } + +// LenY is the length of the Y list. +func (es EditScript) LenY() int { return len(es) - es.stats().NX } + +// EqualFunc reports whether the symbols at indexes ix and iy are equal. +// When called by Difference, the index is guaranteed to be within nx and ny. +type EqualFunc func(ix int, iy int) Result + +// Result is the result of comparison. +// NumSame is the number of sub-elements that are equal. +// NumDiff is the number of sub-elements that are not equal. +type Result struct{ NumSame, NumDiff int } + +// BoolResult returns a Result that is either Equal or not Equal. +func BoolResult(b bool) Result { + if b { + return Result{NumSame: 1} // Equal, Similar + } else { + return Result{NumDiff: 2} // Not Equal, not Similar + } +} + +// Equal indicates whether the symbols are equal. Two symbols are equal +// if and only if NumDiff == 0. If Equal, then they are also Similar. +func (r Result) Equal() bool { return r.NumDiff == 0 } + +// Similar indicates whether two symbols are similar and may be represented +// by using the Modified type. As a special case, we consider binary comparisons +// (i.e., those that return Result{1, 0} or Result{0, 1}) to be similar. +// +// The exact ratio of NumSame to NumDiff to determine similarity may change. +func (r Result) Similar() bool { + // Use NumSame+1 to offset NumSame so that binary comparisons are similar. + return r.NumSame+1 >= r.NumDiff +} + +var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0 + +// Difference reports whether two lists of lengths nx and ny are equal +// given the definition of equality provided as f. +// +// This function returns an edit-script, which is a sequence of operations +// needed to convert one list into the other. The following invariants for +// the edit-script are maintained: +// - eq == (es.Dist()==0) +// - nx == es.LenX() +// - ny == es.LenY() +// +// This algorithm is not guaranteed to be an optimal solution (i.e., one that +// produces an edit-script with a minimal Levenshtein distance). This algorithm +// favors performance over optimality. The exact output is not guaranteed to +// be stable and may change over time. +func Difference(nx, ny int, f EqualFunc) (es EditScript) { + // This algorithm is based on traversing what is known as an "edit-graph". + // See Figure 1 from "An O(ND) Difference Algorithm and Its Variations" + // by Eugene W. Myers. Since D can be as large as N itself, this is + // effectively O(N^2). Unlike the algorithm from that paper, we are not + // interested in the optimal path, but at least some "decent" path. + // + // For example, let X and Y be lists of symbols: + // X = [A B C A B B A] + // Y = [C B A B A C] + // + // The edit-graph can be drawn as the following: + // A B C A B B A + // ┌─────────────┐ + // C │_|_|\|_|_|_|_│ 0 + // B │_|\|_|_|\|\|_│ 1 + // A │\|_|_|\|_|_|\│ 2 + // B │_|\|_|_|\|\|_│ 3 + // A │\|_|_|\|_|_|\│ 4 + // C │ | |\| | | | │ 5 + // └─────────────┘ 6 + // 0 1 2 3 4 5 6 7 + // + // List X is written along the horizontal axis, while list Y is written + // along the vertical axis. At any point on this grid, if the symbol in + // list X matches the corresponding symbol in list Y, then a '\' is drawn. + // The goal of any minimal edit-script algorithm is to find a path from the + // top-left corner to the bottom-right corner, while traveling through the + // fewest horizontal or vertical edges. + // A horizontal edge is equivalent to inserting a symbol from list X. + // A vertical edge is equivalent to inserting a symbol from list Y. + // A diagonal edge is equivalent to a matching symbol between both X and Y. + + // Invariants: + // - 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx + // - 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny + // + // In general: + // - fwdFrontier.X < revFrontier.X + // - fwdFrontier.Y < revFrontier.Y + // + // Unless, it is time for the algorithm to terminate. + fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)} + revPath := path{-1, point{nx, ny}, make(EditScript, 0)} + fwdFrontier := fwdPath.point // Forward search frontier + revFrontier := revPath.point // Reverse search frontier + + // Search budget bounds the cost of searching for better paths. + // The longest sequence of non-matching symbols that can be tolerated is + // approximately the square-root of the search budget. + searchBudget := 4 * (nx + ny) // O(n) + + // Running the tests with the "cmp_debug" build tag prints a visualization + // of the algorithm running in real-time. This is educational for + // understanding how the algorithm works. See debug_enable.go. + f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es) + + // The algorithm below is a greedy, meet-in-the-middle algorithm for + // computing sub-optimal edit-scripts between two lists. + // + // The algorithm is approximately as follows: + // - Searching for differences switches back-and-forth between + // a search that starts at the beginning (the top-left corner), and + // a search that starts at the end (the bottom-right corner). + // The goal of the search is connect with the search + // from the opposite corner. + // - As we search, we build a path in a greedy manner, + // where the first match seen is added to the path (this is sub-optimal, + // but provides a decent result in practice). When matches are found, + // we try the next pair of symbols in the lists and follow all matches + // as far as possible. + // - When searching for matches, we search along a diagonal going through + // through the "frontier" point. If no matches are found, + // we advance the frontier towards the opposite corner. + // - This algorithm terminates when either the X coordinates or the + // Y coordinates of the forward and reverse frontier points ever intersect. + + // This algorithm is correct even if searching only in the forward direction + // or in the reverse direction. We do both because it is commonly observed + // that two lists commonly differ because elements were added to the front + // or end of the other list. + // + // Non-deterministically start with either the forward or reverse direction + // to introduce some deliberate instability so that we have the flexibility + // to change this algorithm in the future. + if flags.Deterministic || randBool { + goto forwardSearch + } else { + goto reverseSearch + } + +forwardSearch: + { + // Forward search from the beginning. + if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { + goto finishSearch + } + for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ { + // Search in a diagonal pattern for a match. + z := zigzag(i) + p := point{fwdFrontier.X + z, fwdFrontier.Y - z} + switch { + case p.X >= revPath.X || p.Y < fwdPath.Y: + stop1 = true // Hit top-right corner + case p.Y >= revPath.Y || p.X < fwdPath.X: + stop2 = true // Hit bottom-left corner + case f(p.X, p.Y).Equal(): + // Match found, so connect the path to this point. + fwdPath.connect(p, f) + fwdPath.append(Identity) + // Follow sequence of matches as far as possible. + for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y { + if !f(fwdPath.X, fwdPath.Y).Equal() { + break + } + fwdPath.append(Identity) + } + fwdFrontier = fwdPath.point + stop1, stop2 = true, true + default: + searchBudget-- // Match not found + } + debug.Update() + } + // Advance the frontier towards reverse point. + if revPath.X-fwdFrontier.X >= revPath.Y-fwdFrontier.Y { + fwdFrontier.X++ + } else { + fwdFrontier.Y++ + } + goto reverseSearch + } + +reverseSearch: + { + // Reverse search from the end. + if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { + goto finishSearch + } + for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ { + // Search in a diagonal pattern for a match. + z := zigzag(i) + p := point{revFrontier.X - z, revFrontier.Y + z} + switch { + case fwdPath.X >= p.X || revPath.Y < p.Y: + stop1 = true // Hit bottom-left corner + case fwdPath.Y >= p.Y || revPath.X < p.X: + stop2 = true // Hit top-right corner + case f(p.X-1, p.Y-1).Equal(): + // Match found, so connect the path to this point. + revPath.connect(p, f) + revPath.append(Identity) + // Follow sequence of matches as far as possible. + for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y { + if !f(revPath.X-1, revPath.Y-1).Equal() { + break + } + revPath.append(Identity) + } + revFrontier = revPath.point + stop1, stop2 = true, true + default: + searchBudget-- // Match not found + } + debug.Update() + } + // Advance the frontier towards forward point. + if revFrontier.X-fwdPath.X >= revFrontier.Y-fwdPath.Y { + revFrontier.X-- + } else { + revFrontier.Y-- + } + goto forwardSearch + } + +finishSearch: + // Join the forward and reverse paths and then append the reverse path. + fwdPath.connect(revPath.point, f) + for i := len(revPath.es) - 1; i >= 0; i-- { + t := revPath.es[i] + revPath.es = revPath.es[:i] + fwdPath.append(t) + } + debug.Finish() + return fwdPath.es +} + +type path struct { + dir int // +1 if forward, -1 if reverse + point // Leading point of the EditScript path + es EditScript +} + +// connect appends any necessary Identity, Modified, UniqueX, or UniqueY types +// to the edit-script to connect p.point to dst. +func (p *path) connect(dst point, f EqualFunc) { + if p.dir > 0 { + // Connect in forward direction. + for dst.X > p.X && dst.Y > p.Y { + switch r := f(p.X, p.Y); { + case r.Equal(): + p.append(Identity) + case r.Similar(): + p.append(Modified) + case dst.X-p.X >= dst.Y-p.Y: + p.append(UniqueX) + default: + p.append(UniqueY) + } + } + for dst.X > p.X { + p.append(UniqueX) + } + for dst.Y > p.Y { + p.append(UniqueY) + } + } else { + // Connect in reverse direction. + for p.X > dst.X && p.Y > dst.Y { + switch r := f(p.X-1, p.Y-1); { + case r.Equal(): + p.append(Identity) + case r.Similar(): + p.append(Modified) + case p.Y-dst.Y >= p.X-dst.X: + p.append(UniqueY) + default: + p.append(UniqueX) + } + } + for p.X > dst.X { + p.append(UniqueX) + } + for p.Y > dst.Y { + p.append(UniqueY) + } + } +} + +func (p *path) append(t EditType) { + p.es = append(p.es, t) + switch t { + case Identity, Modified: + p.add(p.dir, p.dir) + case UniqueX: + p.add(p.dir, 0) + case UniqueY: + p.add(0, p.dir) + } + debug.Update() +} + +type point struct{ X, Y int } + +func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy } + +// zigzag maps a consecutive sequence of integers to a zig-zag sequence. +// +// [0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...] +func zigzag(x int) int { + if x&1 != 0 { + x = ^x + } + return x >> 1 +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go new file mode 100644 index 00000000..d8e459c9 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go @@ -0,0 +1,9 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flags + +// Deterministic controls whether the output of Diff should be deterministic. +// This is only used for testing. +var Deterministic bool diff --git a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go new file mode 100644 index 00000000..d127d436 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go @@ -0,0 +1,99 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package function provides functionality for identifying function types. +package function + +import ( + "reflect" + "regexp" + "runtime" + "strings" +) + +type funcType int + +const ( + _ funcType = iota + + tbFunc // func(T) bool + ttbFunc // func(T, T) bool + trbFunc // func(T, R) bool + tibFunc // func(T, I) bool + trFunc // func(T) R + + Equal = ttbFunc // func(T, T) bool + EqualAssignable = tibFunc // func(T, I) bool; encapsulates func(T, T) bool + Transformer = trFunc // func(T) R + ValueFilter = ttbFunc // func(T, T) bool + Less = ttbFunc // func(T, T) bool + ValuePredicate = tbFunc // func(T) bool + KeyValuePredicate = trbFunc // func(T, R) bool +) + +var boolType = reflect.TypeOf(true) + +// IsType reports whether the reflect.Type is of the specified function type. +func IsType(t reflect.Type, ft funcType) bool { + if t == nil || t.Kind() != reflect.Func || t.IsVariadic() { + return false + } + ni, no := t.NumIn(), t.NumOut() + switch ft { + case tbFunc: // func(T) bool + if ni == 1 && no == 1 && t.Out(0) == boolType { + return true + } + case ttbFunc: // func(T, T) bool + if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType { + return true + } + case trbFunc: // func(T, R) bool + if ni == 2 && no == 1 && t.Out(0) == boolType { + return true + } + case tibFunc: // func(T, I) bool + if ni == 2 && no == 1 && t.In(0).AssignableTo(t.In(1)) && t.Out(0) == boolType { + return true + } + case trFunc: // func(T) R + if ni == 1 && no == 1 { + return true + } + } + return false +} + +var lastIdentRx = regexp.MustCompile(`[_\p{L}][_\p{L}\p{N}]*$`) + +// NameOf returns the name of the function value. +func NameOf(v reflect.Value) string { + fnc := runtime.FuncForPC(v.Pointer()) + if fnc == nil { + return "" + } + fullName := fnc.Name() // e.g., "long/path/name/mypkg.(*MyType).(long/path/name/mypkg.myMethod)-fm" + + // Method closures have a "-fm" suffix. + fullName = strings.TrimSuffix(fullName, "-fm") + + var name string + for len(fullName) > 0 { + inParen := strings.HasSuffix(fullName, ")") + fullName = strings.TrimSuffix(fullName, ")") + + s := lastIdentRx.FindString(fullName) + if s == "" { + break + } + name = s + "." + name + fullName = strings.TrimSuffix(fullName, s) + + if i := strings.LastIndexByte(fullName, '('); inParen && i >= 0 { + fullName = fullName[:i] + } + fullName = strings.TrimSuffix(fullName, ".") + } + return strings.TrimSuffix(name, ".") +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/name.go b/vendor/github.com/google/go-cmp/cmp/internal/value/name.go new file mode 100644 index 00000000..7b498bb2 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/name.go @@ -0,0 +1,164 @@ +// Copyright 2020, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package value + +import ( + "reflect" + "strconv" +) + +var anyType = reflect.TypeOf((*interface{})(nil)).Elem() + +// TypeString is nearly identical to reflect.Type.String, +// but has an additional option to specify that full type names be used. +func TypeString(t reflect.Type, qualified bool) string { + return string(appendTypeName(nil, t, qualified, false)) +} + +func appendTypeName(b []byte, t reflect.Type, qualified, elideFunc bool) []byte { + // BUG: Go reflection provides no way to disambiguate two named types + // of the same name and within the same package, + // but declared within the namespace of different functions. + + // Use the "any" alias instead of "interface{}" for better readability. + if t == anyType { + return append(b, "any"...) + } + + // Named type. + if t.Name() != "" { + if qualified && t.PkgPath() != "" { + b = append(b, '"') + b = append(b, t.PkgPath()...) + b = append(b, '"') + b = append(b, '.') + b = append(b, t.Name()...) + } else { + b = append(b, t.String()...) + } + return b + } + + // Unnamed type. + switch k := t.Kind(); k { + case reflect.Bool, reflect.String, reflect.UnsafePointer, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, + reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: + b = append(b, k.String()...) + case reflect.Chan: + if t.ChanDir() == reflect.RecvDir { + b = append(b, "<-"...) + } + b = append(b, "chan"...) + if t.ChanDir() == reflect.SendDir { + b = append(b, "<-"...) + } + b = append(b, ' ') + b = appendTypeName(b, t.Elem(), qualified, false) + case reflect.Func: + if !elideFunc { + b = append(b, "func"...) + } + b = append(b, '(') + for i := 0; i < t.NumIn(); i++ { + if i > 0 { + b = append(b, ", "...) + } + if i == t.NumIn()-1 && t.IsVariadic() { + b = append(b, "..."...) + b = appendTypeName(b, t.In(i).Elem(), qualified, false) + } else { + b = appendTypeName(b, t.In(i), qualified, false) + } + } + b = append(b, ')') + switch t.NumOut() { + case 0: + // Do nothing + case 1: + b = append(b, ' ') + b = appendTypeName(b, t.Out(0), qualified, false) + default: + b = append(b, " ("...) + for i := 0; i < t.NumOut(); i++ { + if i > 0 { + b = append(b, ", "...) + } + b = appendTypeName(b, t.Out(i), qualified, false) + } + b = append(b, ')') + } + case reflect.Struct: + b = append(b, "struct{ "...) + for i := 0; i < t.NumField(); i++ { + if i > 0 { + b = append(b, "; "...) + } + sf := t.Field(i) + if !sf.Anonymous { + if qualified && sf.PkgPath != "" { + b = append(b, '"') + b = append(b, sf.PkgPath...) + b = append(b, '"') + b = append(b, '.') + } + b = append(b, sf.Name...) + b = append(b, ' ') + } + b = appendTypeName(b, sf.Type, qualified, false) + if sf.Tag != "" { + b = append(b, ' ') + b = strconv.AppendQuote(b, string(sf.Tag)) + } + } + if b[len(b)-1] == ' ' { + b = b[:len(b)-1] + } else { + b = append(b, ' ') + } + b = append(b, '}') + case reflect.Slice, reflect.Array: + b = append(b, '[') + if k == reflect.Array { + b = strconv.AppendUint(b, uint64(t.Len()), 10) + } + b = append(b, ']') + b = appendTypeName(b, t.Elem(), qualified, false) + case reflect.Map: + b = append(b, "map["...) + b = appendTypeName(b, t.Key(), qualified, false) + b = append(b, ']') + b = appendTypeName(b, t.Elem(), qualified, false) + case reflect.Ptr: + b = append(b, '*') + b = appendTypeName(b, t.Elem(), qualified, false) + case reflect.Interface: + b = append(b, "interface{ "...) + for i := 0; i < t.NumMethod(); i++ { + if i > 0 { + b = append(b, "; "...) + } + m := t.Method(i) + if qualified && m.PkgPath != "" { + b = append(b, '"') + b = append(b, m.PkgPath...) + b = append(b, '"') + b = append(b, '.') + } + b = append(b, m.Name...) + b = appendTypeName(b, m.Type, qualified, true) + } + if b[len(b)-1] == ' ' { + b = b[:len(b)-1] + } else { + b = append(b, ' ') + } + b = append(b, '}') + default: + panic("invalid kind: " + k.String()) + } + return b +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go new file mode 100644 index 00000000..1a71bfcb --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go @@ -0,0 +1,34 @@ +// Copyright 2018, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build purego +// +build purego + +package value + +import "reflect" + +// Pointer is an opaque typed pointer and is guaranteed to be comparable. +type Pointer struct { + p uintptr + t reflect.Type +} + +// PointerOf returns a Pointer from v, which must be a +// reflect.Ptr, reflect.Slice, or reflect.Map. +func PointerOf(v reflect.Value) Pointer { + // NOTE: Storing a pointer as an uintptr is technically incorrect as it + // assumes that the GC implementation does not use a moving collector. + return Pointer{v.Pointer(), v.Type()} +} + +// IsNil reports whether the pointer is nil. +func (p Pointer) IsNil() bool { + return p.p == 0 +} + +// Uintptr returns the pointer as a uintptr. +func (p Pointer) Uintptr() uintptr { + return p.p +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go new file mode 100644 index 00000000..16e6860a --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go @@ -0,0 +1,37 @@ +// Copyright 2018, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !purego +// +build !purego + +package value + +import ( + "reflect" + "unsafe" +) + +// Pointer is an opaque typed pointer and is guaranteed to be comparable. +type Pointer struct { + p unsafe.Pointer + t reflect.Type +} + +// PointerOf returns a Pointer from v, which must be a +// reflect.Ptr, reflect.Slice, or reflect.Map. +func PointerOf(v reflect.Value) Pointer { + // The proper representation of a pointer is unsafe.Pointer, + // which is necessary if the GC ever uses a moving collector. + return Pointer{unsafe.Pointer(v.Pointer()), v.Type()} +} + +// IsNil reports whether the pointer is nil. +func (p Pointer) IsNil() bool { + return p.p == nil +} + +// Uintptr returns the pointer as a uintptr. +func (p Pointer) Uintptr() uintptr { + return uintptr(p.p) +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go new file mode 100644 index 00000000..98533b03 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go @@ -0,0 +1,106 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package value + +import ( + "fmt" + "math" + "reflect" + "sort" +) + +// SortKeys sorts a list of map keys, deduplicating keys if necessary. +// The type of each value must be comparable. +func SortKeys(vs []reflect.Value) []reflect.Value { + if len(vs) == 0 { + return vs + } + + // Sort the map keys. + sort.SliceStable(vs, func(i, j int) bool { return isLess(vs[i], vs[j]) }) + + // Deduplicate keys (fails for NaNs). + vs2 := vs[:1] + for _, v := range vs[1:] { + if isLess(vs2[len(vs2)-1], v) { + vs2 = append(vs2, v) + } + } + return vs2 +} + +// isLess is a generic function for sorting arbitrary map keys. +// The inputs must be of the same type and must be comparable. +func isLess(x, y reflect.Value) bool { + switch x.Type().Kind() { + case reflect.Bool: + return !x.Bool() && y.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return x.Int() < y.Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return x.Uint() < y.Uint() + case reflect.Float32, reflect.Float64: + // NOTE: This does not sort -0 as less than +0 + // since Go maps treat -0 and +0 as equal keys. + fx, fy := x.Float(), y.Float() + return fx < fy || math.IsNaN(fx) && !math.IsNaN(fy) + case reflect.Complex64, reflect.Complex128: + cx, cy := x.Complex(), y.Complex() + rx, ix, ry, iy := real(cx), imag(cx), real(cy), imag(cy) + if rx == ry || (math.IsNaN(rx) && math.IsNaN(ry)) { + return ix < iy || math.IsNaN(ix) && !math.IsNaN(iy) + } + return rx < ry || math.IsNaN(rx) && !math.IsNaN(ry) + case reflect.Ptr, reflect.UnsafePointer, reflect.Chan: + return x.Pointer() < y.Pointer() + case reflect.String: + return x.String() < y.String() + case reflect.Array: + for i := 0; i < x.Len(); i++ { + if isLess(x.Index(i), y.Index(i)) { + return true + } + if isLess(y.Index(i), x.Index(i)) { + return false + } + } + return false + case reflect.Struct: + for i := 0; i < x.NumField(); i++ { + if isLess(x.Field(i), y.Field(i)) { + return true + } + if isLess(y.Field(i), x.Field(i)) { + return false + } + } + return false + case reflect.Interface: + vx, vy := x.Elem(), y.Elem() + if !vx.IsValid() || !vy.IsValid() { + return !vx.IsValid() && vy.IsValid() + } + tx, ty := vx.Type(), vy.Type() + if tx == ty { + return isLess(x.Elem(), y.Elem()) + } + if tx.Kind() != ty.Kind() { + return vx.Kind() < vy.Kind() + } + if tx.String() != ty.String() { + return tx.String() < ty.String() + } + if tx.PkgPath() != ty.PkgPath() { + return tx.PkgPath() < ty.PkgPath() + } + // This can happen in rare situations, so we fallback to just comparing + // the unique pointer for a reflect.Type. This guarantees deterministic + // ordering within a program, but it is obviously not stable. + return reflect.ValueOf(vx.Type()).Pointer() < reflect.ValueOf(vy.Type()).Pointer() + default: + // Must be Func, Map, or Slice; which are not comparable. + panic(fmt.Sprintf("%T is not comparable", x.Type())) + } +} diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go new file mode 100644 index 00000000..1f9ca9c4 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/options.go @@ -0,0 +1,554 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmp + +import ( + "fmt" + "reflect" + "regexp" + "strings" + + "github.com/google/go-cmp/cmp/internal/function" +) + +// Option configures for specific behavior of Equal and Diff. In particular, +// the fundamental Option functions (Ignore, Transformer, and Comparer), +// configure how equality is determined. +// +// The fundamental options may be composed with filters (FilterPath and +// FilterValues) to control the scope over which they are applied. +// +// The cmp/cmpopts package provides helper functions for creating options that +// may be used with Equal and Diff. +type Option interface { + // filter applies all filters and returns the option that remains. + // Each option may only read s.curPath and call s.callTTBFunc. + // + // An Options is returned only if multiple comparers or transformers + // can apply simultaneously and will only contain values of those types + // or sub-Options containing values of those types. + filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption +} + +// applicableOption represents the following types: +// +// Fundamental: ignore | validator | *comparer | *transformer +// Grouping: Options +type applicableOption interface { + Option + + // apply executes the option, which may mutate s or panic. + apply(s *state, vx, vy reflect.Value) +} + +// coreOption represents the following types: +// +// Fundamental: ignore | validator | *comparer | *transformer +// Filters: *pathFilter | *valuesFilter +type coreOption interface { + Option + isCore() +} + +type core struct{} + +func (core) isCore() {} + +// Options is a list of Option values that also satisfies the Option interface. +// Helper comparison packages may return an Options value when packing multiple +// Option values into a single Option. When this package processes an Options, +// it will be implicitly expanded into a flat list. +// +// Applying a filter on an Options is equivalent to applying that same filter +// on all individual options held within. +type Options []Option + +func (opts Options) filter(s *state, t reflect.Type, vx, vy reflect.Value) (out applicableOption) { + for _, opt := range opts { + switch opt := opt.filter(s, t, vx, vy); opt.(type) { + case ignore: + return ignore{} // Only ignore can short-circuit evaluation + case validator: + out = validator{} // Takes precedence over comparer or transformer + case *comparer, *transformer, Options: + switch out.(type) { + case nil: + out = opt + case validator: + // Keep validator + case *comparer, *transformer, Options: + out = Options{out, opt} // Conflicting comparers or transformers + } + } + } + return out +} + +func (opts Options) apply(s *state, _, _ reflect.Value) { + const warning = "ambiguous set of applicable options" + const help = "consider using filters to ensure at most one Comparer or Transformer may apply" + var ss []string + for _, opt := range flattenOptions(nil, opts) { + ss = append(ss, fmt.Sprint(opt)) + } + set := strings.Join(ss, "\n\t") + panic(fmt.Sprintf("%s at %#v:\n\t%s\n%s", warning, s.curPath, set, help)) +} + +func (opts Options) String() string { + var ss []string + for _, opt := range opts { + ss = append(ss, fmt.Sprint(opt)) + } + return fmt.Sprintf("Options{%s}", strings.Join(ss, ", ")) +} + +// FilterPath returns a new Option where opt is only evaluated if filter f +// returns true for the current Path in the value tree. +// +// This filter is called even if a slice element or map entry is missing and +// provides an opportunity to ignore such cases. The filter function must be +// symmetric such that the filter result is identical regardless of whether the +// missing value is from x or y. +// +// The option passed in may be an Ignore, Transformer, Comparer, Options, or +// a previously filtered Option. +func FilterPath(f func(Path) bool, opt Option) Option { + if f == nil { + panic("invalid path filter function") + } + if opt := normalizeOption(opt); opt != nil { + return &pathFilter{fnc: f, opt: opt} + } + return nil +} + +type pathFilter struct { + core + fnc func(Path) bool + opt Option +} + +func (f pathFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption { + if f.fnc(s.curPath) { + return f.opt.filter(s, t, vx, vy) + } + return nil +} + +func (f pathFilter) String() string { + return fmt.Sprintf("FilterPath(%s, %v)", function.NameOf(reflect.ValueOf(f.fnc)), f.opt) +} + +// FilterValues returns a new Option where opt is only evaluated if filter f, +// which is a function of the form "func(T, T) bool", returns true for the +// current pair of values being compared. If either value is invalid or +// the type of the values is not assignable to T, then this filter implicitly +// returns false. +// +// The filter function must be +// symmetric (i.e., agnostic to the order of the inputs) and +// deterministic (i.e., produces the same result when given the same inputs). +// If T is an interface, it is possible that f is called with two values with +// different concrete types that both implement T. +// +// The option passed in may be an Ignore, Transformer, Comparer, Options, or +// a previously filtered Option. +func FilterValues(f interface{}, opt Option) Option { + v := reflect.ValueOf(f) + if !function.IsType(v.Type(), function.ValueFilter) || v.IsNil() { + panic(fmt.Sprintf("invalid values filter function: %T", f)) + } + if opt := normalizeOption(opt); opt != nil { + vf := &valuesFilter{fnc: v, opt: opt} + if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { + vf.typ = ti + } + return vf + } + return nil +} + +type valuesFilter struct { + core + typ reflect.Type // T + fnc reflect.Value // func(T, T) bool + opt Option +} + +func (f valuesFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption { + if !vx.IsValid() || !vx.CanInterface() || !vy.IsValid() || !vy.CanInterface() { + return nil + } + if (f.typ == nil || t.AssignableTo(f.typ)) && s.callTTBFunc(f.fnc, vx, vy) { + return f.opt.filter(s, t, vx, vy) + } + return nil +} + +func (f valuesFilter) String() string { + return fmt.Sprintf("FilterValues(%s, %v)", function.NameOf(f.fnc), f.opt) +} + +// Ignore is an Option that causes all comparisons to be ignored. +// This value is intended to be combined with FilterPath or FilterValues. +// It is an error to pass an unfiltered Ignore option to Equal. +func Ignore() Option { return ignore{} } + +type ignore struct{ core } + +func (ignore) isFiltered() bool { return false } +func (ignore) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { return ignore{} } +func (ignore) apply(s *state, _, _ reflect.Value) { s.report(true, reportByIgnore) } +func (ignore) String() string { return "Ignore()" } + +// validator is a sentinel Option type to indicate that some options could not +// be evaluated due to unexported fields, missing slice elements, or +// missing map entries. Both values are validator only for unexported fields. +type validator struct{ core } + +func (validator) filter(_ *state, _ reflect.Type, vx, vy reflect.Value) applicableOption { + if !vx.IsValid() || !vy.IsValid() { + return validator{} + } + if !vx.CanInterface() || !vy.CanInterface() { + return validator{} + } + return nil +} +func (validator) apply(s *state, vx, vy reflect.Value) { + // Implies missing slice element or map entry. + if !vx.IsValid() || !vy.IsValid() { + s.report(vx.IsValid() == vy.IsValid(), 0) + return + } + + // Unable to Interface implies unexported field without visibility access. + if !vx.CanInterface() || !vy.CanInterface() { + help := "consider using a custom Comparer; if you control the implementation of type, you can also consider using an Exporter, AllowUnexported, or cmpopts.IgnoreUnexported" + var name string + if t := s.curPath.Index(-2).Type(); t.Name() != "" { + // Named type with unexported fields. + name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType + if _, ok := reflect.New(t).Interface().(error); ok { + help = "consider using cmpopts.EquateErrors to compare error values" + } + } else { + // Unnamed type with unexported fields. Derive PkgPath from field. + var pkgPath string + for i := 0; i < t.NumField() && pkgPath == ""; i++ { + pkgPath = t.Field(i).PkgPath + } + name = fmt.Sprintf("%q.(%v)", pkgPath, t.String()) // e.g., "path/to/package".(struct { a int }) + } + panic(fmt.Sprintf("cannot handle unexported field at %#v:\n\t%v\n%s", s.curPath, name, help)) + } + + panic("not reachable") +} + +// identRx represents a valid identifier according to the Go specification. +const identRx = `[_\p{L}][_\p{L}\p{N}]*` + +var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`) + +// Transformer returns an Option that applies a transformation function that +// converts values of a certain type into that of another. +// +// The transformer f must be a function "func(T) R" that converts values of +// type T to those of type R and is implicitly filtered to input values +// assignable to T. The transformer must not mutate T in any way. +// +// To help prevent some cases of infinite recursive cycles applying the +// same transform to the output of itself (e.g., in the case where the +// input and output types are the same), an implicit filter is added such that +// a transformer is applicable only if that exact transformer is not already +// in the tail of the Path since the last non-Transform step. +// For situations where the implicit filter is still insufficient, +// consider using cmpopts.AcyclicTransformer, which adds a filter +// to prevent the transformer from being recursively applied upon itself. +// +// The name is a user provided label that is used as the Transform.Name in the +// transformation PathStep (and eventually shown in the Diff output). +// The name must be a valid identifier or qualified identifier in Go syntax. +// If empty, an arbitrary name is used. +func Transformer(name string, f interface{}) Option { + v := reflect.ValueOf(f) + if !function.IsType(v.Type(), function.Transformer) || v.IsNil() { + panic(fmt.Sprintf("invalid transformer function: %T", f)) + } + if name == "" { + name = function.NameOf(v) + if !identsRx.MatchString(name) { + name = "λ" // Lambda-symbol as placeholder name + } + } else if !identsRx.MatchString(name) { + panic(fmt.Sprintf("invalid name: %q", name)) + } + tr := &transformer{name: name, fnc: reflect.ValueOf(f)} + if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { + tr.typ = ti + } + return tr +} + +type transformer struct { + core + name string + typ reflect.Type // T + fnc reflect.Value // func(T) R +} + +func (tr *transformer) isFiltered() bool { return tr.typ != nil } + +func (tr *transformer) filter(s *state, t reflect.Type, _, _ reflect.Value) applicableOption { + for i := len(s.curPath) - 1; i >= 0; i-- { + if t, ok := s.curPath[i].(Transform); !ok { + break // Hit most recent non-Transform step + } else if tr == t.trans { + return nil // Cannot directly use same Transform + } + } + if tr.typ == nil || t.AssignableTo(tr.typ) { + return tr + } + return nil +} + +func (tr *transformer) apply(s *state, vx, vy reflect.Value) { + step := Transform{&transform{pathStep{typ: tr.fnc.Type().Out(0)}, tr}} + vvx := s.callTRFunc(tr.fnc, vx, step) + vvy := s.callTRFunc(tr.fnc, vy, step) + step.vx, step.vy = vvx, vvy + s.compareAny(step) +} + +func (tr transformer) String() string { + return fmt.Sprintf("Transformer(%s, %s)", tr.name, function.NameOf(tr.fnc)) +} + +// Comparer returns an Option that determines whether two values are equal +// to each other. +// +// The comparer f must be a function "func(T, T) bool" and is implicitly +// filtered to input values assignable to T. If T is an interface, it is +// possible that f is called with two values of different concrete types that +// both implement T. +// +// The equality function must be: +// - Symmetric: equal(x, y) == equal(y, x) +// - Deterministic: equal(x, y) == equal(x, y) +// - Pure: equal(x, y) does not modify x or y +func Comparer(f interface{}) Option { + v := reflect.ValueOf(f) + if !function.IsType(v.Type(), function.Equal) || v.IsNil() { + panic(fmt.Sprintf("invalid comparer function: %T", f)) + } + cm := &comparer{fnc: v} + if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { + cm.typ = ti + } + return cm +} + +type comparer struct { + core + typ reflect.Type // T + fnc reflect.Value // func(T, T) bool +} + +func (cm *comparer) isFiltered() bool { return cm.typ != nil } + +func (cm *comparer) filter(_ *state, t reflect.Type, _, _ reflect.Value) applicableOption { + if cm.typ == nil || t.AssignableTo(cm.typ) { + return cm + } + return nil +} + +func (cm *comparer) apply(s *state, vx, vy reflect.Value) { + eq := s.callTTBFunc(cm.fnc, vx, vy) + s.report(eq, reportByFunc) +} + +func (cm comparer) String() string { + return fmt.Sprintf("Comparer(%s)", function.NameOf(cm.fnc)) +} + +// Exporter returns an Option that specifies whether Equal is allowed to +// introspect into the unexported fields of certain struct types. +// +// Users of this option must understand that comparing on unexported fields +// from external packages is not safe since changes in the internal +// implementation of some external package may cause the result of Equal +// to unexpectedly change. However, it may be valid to use this option on types +// defined in an internal package where the semantic meaning of an unexported +// field is in the control of the user. +// +// In many cases, a custom Comparer should be used instead that defines +// equality as a function of the public API of a type rather than the underlying +// unexported implementation. +// +// For example, the reflect.Type documentation defines equality to be determined +// by the == operator on the interface (essentially performing a shallow pointer +// comparison) and most attempts to compare *regexp.Regexp types are interested +// in only checking that the regular expression strings are equal. +// Both of these are accomplished using Comparers: +// +// Comparer(func(x, y reflect.Type) bool { return x == y }) +// Comparer(func(x, y *regexp.Regexp) bool { return x.String() == y.String() }) +// +// In other cases, the cmpopts.IgnoreUnexported option can be used to ignore +// all unexported fields on specified struct types. +func Exporter(f func(reflect.Type) bool) Option { + if !supportExporters { + panic("Exporter is not supported on purego builds") + } + return exporter(f) +} + +type exporter func(reflect.Type) bool + +func (exporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { + panic("not implemented") +} + +// AllowUnexported returns an Options that allows Equal to forcibly introspect +// unexported fields of the specified struct types. +// +// See Exporter for the proper use of this option. +func AllowUnexported(types ...interface{}) Option { + m := make(map[reflect.Type]bool) + for _, typ := range types { + t := reflect.TypeOf(typ) + if t.Kind() != reflect.Struct { + panic(fmt.Sprintf("invalid struct type: %T", typ)) + } + m[t] = true + } + return exporter(func(t reflect.Type) bool { return m[t] }) +} + +// Result represents the comparison result for a single node and +// is provided by cmp when calling Report (see Reporter). +type Result struct { + _ [0]func() // Make Result incomparable + flags resultFlags +} + +// Equal reports whether the node was determined to be equal or not. +// As a special case, ignored nodes are considered equal. +func (r Result) Equal() bool { + return r.flags&(reportEqual|reportByIgnore) != 0 +} + +// ByIgnore reports whether the node is equal because it was ignored. +// This never reports true if Equal reports false. +func (r Result) ByIgnore() bool { + return r.flags&reportByIgnore != 0 +} + +// ByMethod reports whether the Equal method determined equality. +func (r Result) ByMethod() bool { + return r.flags&reportByMethod != 0 +} + +// ByFunc reports whether a Comparer function determined equality. +func (r Result) ByFunc() bool { + return r.flags&reportByFunc != 0 +} + +// ByCycle reports whether a reference cycle was detected. +func (r Result) ByCycle() bool { + return r.flags&reportByCycle != 0 +} + +type resultFlags uint + +const ( + _ resultFlags = (1 << iota) / 2 + + reportEqual + reportUnequal + reportByIgnore + reportByMethod + reportByFunc + reportByCycle +) + +// Reporter is an Option that can be passed to Equal. When Equal traverses +// the value trees, it calls PushStep as it descends into each node in the +// tree and PopStep as it ascend out of the node. The leaves of the tree are +// either compared (determined to be equal or not equal) or ignored and reported +// as such by calling the Report method. +func Reporter(r interface { + // PushStep is called when a tree-traversal operation is performed. + // The PathStep itself is only valid until the step is popped. + // The PathStep.Values are valid for the duration of the entire traversal + // and must not be mutated. + // + // Equal always calls PushStep at the start to provide an operation-less + // PathStep used to report the root values. + // + // Within a slice, the exact set of inserted, removed, or modified elements + // is unspecified and may change in future implementations. + // The entries of a map are iterated through in an unspecified order. + PushStep(PathStep) + + // Report is called exactly once on leaf nodes to report whether the + // comparison identified the node as equal, unequal, or ignored. + // A leaf node is one that is immediately preceded by and followed by + // a pair of PushStep and PopStep calls. + Report(Result) + + // PopStep ascends back up the value tree. + // There is always a matching pop call for every push call. + PopStep() +}) Option { + return reporter{r} +} + +type reporter struct{ reporterIface } +type reporterIface interface { + PushStep(PathStep) + Report(Result) + PopStep() +} + +func (reporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { + panic("not implemented") +} + +// normalizeOption normalizes the input options such that all Options groups +// are flattened and groups with a single element are reduced to that element. +// Only coreOptions and Options containing coreOptions are allowed. +func normalizeOption(src Option) Option { + switch opts := flattenOptions(nil, Options{src}); len(opts) { + case 0: + return nil + case 1: + return opts[0] + default: + return opts + } +} + +// flattenOptions copies all options in src to dst as a flat list. +// Only coreOptions and Options containing coreOptions are allowed. +func flattenOptions(dst, src Options) Options { + for _, opt := range src { + switch opt := opt.(type) { + case nil: + continue + case Options: + dst = flattenOptions(dst, opt) + case coreOption: + dst = append(dst, opt) + default: + panic(fmt.Sprintf("invalid option type: %T", opt)) + } + } + return dst +} diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go new file mode 100644 index 00000000..a0a58850 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/path.go @@ -0,0 +1,380 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmp + +import ( + "fmt" + "reflect" + "strings" + "unicode" + "unicode/utf8" + + "github.com/google/go-cmp/cmp/internal/value" +) + +// Path is a list of PathSteps describing the sequence of operations to get +// from some root type to the current position in the value tree. +// The first Path element is always an operation-less PathStep that exists +// simply to identify the initial type. +// +// When traversing structs with embedded structs, the embedded struct will +// always be accessed as a field before traversing the fields of the +// embedded struct themselves. That is, an exported field from the +// embedded struct will never be accessed directly from the parent struct. +type Path []PathStep + +// PathStep is a union-type for specific operations to traverse +// a value's tree structure. Users of this package never need to implement +// these types as values of this type will be returned by this package. +// +// Implementations of this interface are +// StructField, SliceIndex, MapIndex, Indirect, TypeAssertion, and Transform. +type PathStep interface { + String() string + + // Type is the resulting type after performing the path step. + Type() reflect.Type + + // Values is the resulting values after performing the path step. + // The type of each valid value is guaranteed to be identical to Type. + // + // In some cases, one or both may be invalid or have restrictions: + // - For StructField, both are not interface-able if the current field + // is unexported and the struct type is not explicitly permitted by + // an Exporter to traverse unexported fields. + // - For SliceIndex, one may be invalid if an element is missing from + // either the x or y slice. + // - For MapIndex, one may be invalid if an entry is missing from + // either the x or y map. + // + // The provided values must not be mutated. + Values() (vx, vy reflect.Value) +} + +var ( + _ PathStep = StructField{} + _ PathStep = SliceIndex{} + _ PathStep = MapIndex{} + _ PathStep = Indirect{} + _ PathStep = TypeAssertion{} + _ PathStep = Transform{} +) + +func (pa *Path) push(s PathStep) { + *pa = append(*pa, s) +} + +func (pa *Path) pop() { + *pa = (*pa)[:len(*pa)-1] +} + +// Last returns the last PathStep in the Path. +// If the path is empty, this returns a non-nil PathStep that reports a nil Type. +func (pa Path) Last() PathStep { + return pa.Index(-1) +} + +// Index returns the ith step in the Path and supports negative indexing. +// A negative index starts counting from the tail of the Path such that -1 +// refers to the last step, -2 refers to the second-to-last step, and so on. +// If index is invalid, this returns a non-nil PathStep that reports a nil Type. +func (pa Path) Index(i int) PathStep { + if i < 0 { + i = len(pa) + i + } + if i < 0 || i >= len(pa) { + return pathStep{} + } + return pa[i] +} + +// String returns the simplified path to a node. +// The simplified path only contains struct field accesses. +// +// For example: +// +// MyMap.MySlices.MyField +func (pa Path) String() string { + var ss []string + for _, s := range pa { + if _, ok := s.(StructField); ok { + ss = append(ss, s.String()) + } + } + return strings.TrimPrefix(strings.Join(ss, ""), ".") +} + +// GoString returns the path to a specific node using Go syntax. +// +// For example: +// +// (*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField +func (pa Path) GoString() string { + var ssPre, ssPost []string + var numIndirect int + for i, s := range pa { + var nextStep PathStep + if i+1 < len(pa) { + nextStep = pa[i+1] + } + switch s := s.(type) { + case Indirect: + numIndirect++ + pPre, pPost := "(", ")" + switch nextStep.(type) { + case Indirect: + continue // Next step is indirection, so let them batch up + case StructField: + numIndirect-- // Automatic indirection on struct fields + case nil: + pPre, pPost = "", "" // Last step; no need for parenthesis + } + if numIndirect > 0 { + ssPre = append(ssPre, pPre+strings.Repeat("*", numIndirect)) + ssPost = append(ssPost, pPost) + } + numIndirect = 0 + continue + case Transform: + ssPre = append(ssPre, s.trans.name+"(") + ssPost = append(ssPost, ")") + continue + } + ssPost = append(ssPost, s.String()) + } + for i, j := 0, len(ssPre)-1; i < j; i, j = i+1, j-1 { + ssPre[i], ssPre[j] = ssPre[j], ssPre[i] + } + return strings.Join(ssPre, "") + strings.Join(ssPost, "") +} + +type pathStep struct { + typ reflect.Type + vx, vy reflect.Value +} + +func (ps pathStep) Type() reflect.Type { return ps.typ } +func (ps pathStep) Values() (vx, vy reflect.Value) { return ps.vx, ps.vy } +func (ps pathStep) String() string { + if ps.typ == nil { + return "" + } + s := value.TypeString(ps.typ, false) + if s == "" || strings.ContainsAny(s, "{}\n") { + return "root" // Type too simple or complex to print + } + return fmt.Sprintf("{%s}", s) +} + +// StructField represents a struct field access on a field called Name. +type StructField struct{ *structField } +type structField struct { + pathStep + name string + idx int + + // These fields are used for forcibly accessing an unexported field. + // pvx, pvy, and field are only valid if unexported is true. + unexported bool + mayForce bool // Forcibly allow visibility + paddr bool // Was parent addressable? + pvx, pvy reflect.Value // Parent values (always addressable) + field reflect.StructField // Field information +} + +func (sf StructField) Type() reflect.Type { return sf.typ } +func (sf StructField) Values() (vx, vy reflect.Value) { + if !sf.unexported { + return sf.vx, sf.vy // CanInterface reports true + } + + // Forcibly obtain read-write access to an unexported struct field. + if sf.mayForce { + vx = retrieveUnexportedField(sf.pvx, sf.field, sf.paddr) + vy = retrieveUnexportedField(sf.pvy, sf.field, sf.paddr) + return vx, vy // CanInterface reports true + } + return sf.vx, sf.vy // CanInterface reports false +} +func (sf StructField) String() string { return fmt.Sprintf(".%s", sf.name) } + +// Name is the field name. +func (sf StructField) Name() string { return sf.name } + +// Index is the index of the field in the parent struct type. +// See reflect.Type.Field. +func (sf StructField) Index() int { return sf.idx } + +// SliceIndex is an index operation on a slice or array at some index Key. +type SliceIndex struct{ *sliceIndex } +type sliceIndex struct { + pathStep + xkey, ykey int + isSlice bool // False for reflect.Array +} + +func (si SliceIndex) Type() reflect.Type { return si.typ } +func (si SliceIndex) Values() (vx, vy reflect.Value) { return si.vx, si.vy } +func (si SliceIndex) String() string { + switch { + case si.xkey == si.ykey: + return fmt.Sprintf("[%d]", si.xkey) + case si.ykey == -1: + // [5->?] means "I don't know where X[5] went" + return fmt.Sprintf("[%d->?]", si.xkey) + case si.xkey == -1: + // [?->3] means "I don't know where Y[3] came from" + return fmt.Sprintf("[?->%d]", si.ykey) + default: + // [5->3] means "X[5] moved to Y[3]" + return fmt.Sprintf("[%d->%d]", si.xkey, si.ykey) + } +} + +// Key is the index key; it may return -1 if in a split state +func (si SliceIndex) Key() int { + if si.xkey != si.ykey { + return -1 + } + return si.xkey +} + +// SplitKeys are the indexes for indexing into slices in the +// x and y values, respectively. These indexes may differ due to the +// insertion or removal of an element in one of the slices, causing +// all of the indexes to be shifted. If an index is -1, then that +// indicates that the element does not exist in the associated slice. +// +// Key is guaranteed to return -1 if and only if the indexes returned +// by SplitKeys are not the same. SplitKeys will never return -1 for +// both indexes. +func (si SliceIndex) SplitKeys() (ix, iy int) { return si.xkey, si.ykey } + +// MapIndex is an index operation on a map at some index Key. +type MapIndex struct{ *mapIndex } +type mapIndex struct { + pathStep + key reflect.Value +} + +func (mi MapIndex) Type() reflect.Type { return mi.typ } +func (mi MapIndex) Values() (vx, vy reflect.Value) { return mi.vx, mi.vy } +func (mi MapIndex) String() string { return fmt.Sprintf("[%#v]", mi.key) } + +// Key is the value of the map key. +func (mi MapIndex) Key() reflect.Value { return mi.key } + +// Indirect represents pointer indirection on the parent type. +type Indirect struct{ *indirect } +type indirect struct { + pathStep +} + +func (in Indirect) Type() reflect.Type { return in.typ } +func (in Indirect) Values() (vx, vy reflect.Value) { return in.vx, in.vy } +func (in Indirect) String() string { return "*" } + +// TypeAssertion represents a type assertion on an interface. +type TypeAssertion struct{ *typeAssertion } +type typeAssertion struct { + pathStep +} + +func (ta TypeAssertion) Type() reflect.Type { return ta.typ } +func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy } +func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", value.TypeString(ta.typ, false)) } + +// Transform is a transformation from the parent type to the current type. +type Transform struct{ *transform } +type transform struct { + pathStep + trans *transformer +} + +func (tf Transform) Type() reflect.Type { return tf.typ } +func (tf Transform) Values() (vx, vy reflect.Value) { return tf.vx, tf.vy } +func (tf Transform) String() string { return fmt.Sprintf("%s()", tf.trans.name) } + +// Name is the name of the Transformer. +func (tf Transform) Name() string { return tf.trans.name } + +// Func is the function pointer to the transformer function. +func (tf Transform) Func() reflect.Value { return tf.trans.fnc } + +// Option returns the originally constructed Transformer option. +// The == operator can be used to detect the exact option used. +func (tf Transform) Option() Option { return tf.trans } + +// pointerPath represents a dual-stack of pointers encountered when +// recursively traversing the x and y values. This data structure supports +// detection of cycles and determining whether the cycles are equal. +// In Go, cycles can occur via pointers, slices, and maps. +// +// The pointerPath uses a map to represent a stack; where descension into a +// pointer pushes the address onto the stack, and ascension from a pointer +// pops the address from the stack. Thus, when traversing into a pointer from +// reflect.Ptr, reflect.Slice element, or reflect.Map, we can detect cycles +// by checking whether the pointer has already been visited. The cycle detection +// uses a separate stack for the x and y values. +// +// If a cycle is detected we need to determine whether the two pointers +// should be considered equal. The definition of equality chosen by Equal +// requires two graphs to have the same structure. To determine this, both the +// x and y values must have a cycle where the previous pointers were also +// encountered together as a pair. +// +// Semantically, this is equivalent to augmenting Indirect, SliceIndex, and +// MapIndex with pointer information for the x and y values. +// Suppose px and py are two pointers to compare, we then search the +// Path for whether px was ever encountered in the Path history of x, and +// similarly so with py. If either side has a cycle, the comparison is only +// equal if both px and py have a cycle resulting from the same PathStep. +// +// Using a map as a stack is more performant as we can perform cycle detection +// in O(1) instead of O(N) where N is len(Path). +type pointerPath struct { + // mx is keyed by x pointers, where the value is the associated y pointer. + mx map[value.Pointer]value.Pointer + // my is keyed by y pointers, where the value is the associated x pointer. + my map[value.Pointer]value.Pointer +} + +func (p *pointerPath) Init() { + p.mx = make(map[value.Pointer]value.Pointer) + p.my = make(map[value.Pointer]value.Pointer) +} + +// Push indicates intent to descend into pointers vx and vy where +// visited reports whether either has been seen before. If visited before, +// equal reports whether both pointers were encountered together. +// Pop must be called if and only if the pointers were never visited. +// +// The pointers vx and vy must be a reflect.Ptr, reflect.Slice, or reflect.Map +// and be non-nil. +func (p pointerPath) Push(vx, vy reflect.Value) (equal, visited bool) { + px := value.PointerOf(vx) + py := value.PointerOf(vy) + _, ok1 := p.mx[px] + _, ok2 := p.my[py] + if ok1 || ok2 { + equal = p.mx[px] == py && p.my[py] == px // Pointers paired together + return equal, true + } + p.mx[px] = py + p.my[py] = px + return false, false +} + +// Pop ascends from pointers vx and vy. +func (p pointerPath) Pop(vx, vy reflect.Value) { + delete(p.mx, value.PointerOf(vx)) + delete(p.my, value.PointerOf(vy)) +} + +// isExported reports whether the identifier is exported. +func isExported(id string) bool { + r, _ := utf8.DecodeRuneInString(id) + return unicode.IsUpper(r) +} diff --git a/vendor/github.com/google/go-cmp/cmp/report.go b/vendor/github.com/google/go-cmp/cmp/report.go new file mode 100644 index 00000000..f43cd12e --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report.go @@ -0,0 +1,54 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmp + +// defaultReporter implements the reporter interface. +// +// As Equal serially calls the PushStep, Report, and PopStep methods, the +// defaultReporter constructs a tree-based representation of the compared value +// and the result of each comparison (see valueNode). +// +// When the String method is called, the FormatDiff method transforms the +// valueNode tree into a textNode tree, which is a tree-based representation +// of the textual output (see textNode). +// +// Lastly, the textNode.String method produces the final report as a string. +type defaultReporter struct { + root *valueNode + curr *valueNode +} + +func (r *defaultReporter) PushStep(ps PathStep) { + r.curr = r.curr.PushStep(ps) + if r.root == nil { + r.root = r.curr + } +} +func (r *defaultReporter) Report(rs Result) { + r.curr.Report(rs) +} +func (r *defaultReporter) PopStep() { + r.curr = r.curr.PopStep() +} + +// String provides a full report of the differences detected as a structured +// literal in pseudo-Go syntax. String may only be called after the entire tree +// has been traversed. +func (r *defaultReporter) String() string { + assert(r.root != nil && r.curr == nil) + if r.root.NumDiff == 0 { + return "" + } + ptrs := new(pointerReferences) + text := formatOptions{}.FormatDiff(r.root, ptrs) + resolveReferences(text) + return text.String() +} + +func assert(ok bool) { + if !ok { + panic("assertion failure") + } +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_compare.go b/vendor/github.com/google/go-cmp/cmp/report_compare.go new file mode 100644 index 00000000..2050bf6b --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_compare.go @@ -0,0 +1,433 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmp + +import ( + "fmt" + "reflect" +) + +// numContextRecords is the number of surrounding equal records to print. +const numContextRecords = 2 + +type diffMode byte + +const ( + diffUnknown diffMode = 0 + diffIdentical diffMode = ' ' + diffRemoved diffMode = '-' + diffInserted diffMode = '+' +) + +type typeMode int + +const ( + // emitType always prints the type. + emitType typeMode = iota + // elideType never prints the type. + elideType + // autoType prints the type only for composite kinds + // (i.e., structs, slices, arrays, and maps). + autoType +) + +type formatOptions struct { + // DiffMode controls the output mode of FormatDiff. + // + // If diffUnknown, then produce a diff of the x and y values. + // If diffIdentical, then emit values as if they were equal. + // If diffRemoved, then only emit x values (ignoring y values). + // If diffInserted, then only emit y values (ignoring x values). + DiffMode diffMode + + // TypeMode controls whether to print the type for the current node. + // + // As a general rule of thumb, we always print the type of the next node + // after an interface, and always elide the type of the next node after + // a slice or map node. + TypeMode typeMode + + // formatValueOptions are options specific to printing reflect.Values. + formatValueOptions +} + +func (opts formatOptions) WithDiffMode(d diffMode) formatOptions { + opts.DiffMode = d + return opts +} +func (opts formatOptions) WithTypeMode(t typeMode) formatOptions { + opts.TypeMode = t + return opts +} +func (opts formatOptions) WithVerbosity(level int) formatOptions { + opts.VerbosityLevel = level + opts.LimitVerbosity = true + return opts +} +func (opts formatOptions) verbosity() uint { + switch { + case opts.VerbosityLevel < 0: + return 0 + case opts.VerbosityLevel > 16: + return 16 // some reasonable maximum to avoid shift overflow + default: + return uint(opts.VerbosityLevel) + } +} + +const maxVerbosityPreset = 6 + +// verbosityPreset modifies the verbosity settings given an index +// between 0 and maxVerbosityPreset, inclusive. +func verbosityPreset(opts formatOptions, i int) formatOptions { + opts.VerbosityLevel = int(opts.verbosity()) + 2*i + if i > 0 { + opts.AvoidStringer = true + } + if i >= maxVerbosityPreset { + opts.PrintAddresses = true + opts.QualifiedNames = true + } + return opts +} + +// FormatDiff converts a valueNode tree into a textNode tree, where the later +// is a textual representation of the differences detected in the former. +func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out textNode) { + if opts.DiffMode == diffIdentical { + opts = opts.WithVerbosity(1) + } else if opts.verbosity() < 3 { + opts = opts.WithVerbosity(3) + } + + // Check whether we have specialized formatting for this node. + // This is not necessary, but helpful for producing more readable outputs. + if opts.CanFormatDiffSlice(v) { + return opts.FormatDiffSlice(v) + } + + var parentKind reflect.Kind + if v.parent != nil && v.parent.TransformerName == "" { + parentKind = v.parent.Type.Kind() + } + + // For leaf nodes, format the value based on the reflect.Values alone. + // As a special case, treat equal []byte as a leaf nodes. + isBytes := v.Type.Kind() == reflect.Slice && v.Type.Elem() == byteType + isEqualBytes := isBytes && v.NumDiff+v.NumIgnored+v.NumTransformed == 0 + if v.MaxDepth == 0 || isEqualBytes { + switch opts.DiffMode { + case diffUnknown, diffIdentical: + // Format Equal. + if v.NumDiff == 0 { + outx := opts.FormatValue(v.ValueX, parentKind, ptrs) + outy := opts.FormatValue(v.ValueY, parentKind, ptrs) + if v.NumIgnored > 0 && v.NumSame == 0 { + return textEllipsis + } else if outx.Len() < outy.Len() { + return outx + } else { + return outy + } + } + + // Format unequal. + assert(opts.DiffMode == diffUnknown) + var list textList + outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, parentKind, ptrs) + outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, parentKind, ptrs) + for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ { + opts2 := verbosityPreset(opts, i).WithTypeMode(elideType) + outx = opts2.FormatValue(v.ValueX, parentKind, ptrs) + outy = opts2.FormatValue(v.ValueY, parentKind, ptrs) + } + if outx != nil { + list = append(list, textRecord{Diff: '-', Value: outx}) + } + if outy != nil { + list = append(list, textRecord{Diff: '+', Value: outy}) + } + return opts.WithTypeMode(emitType).FormatType(v.Type, list) + case diffRemoved: + return opts.FormatValue(v.ValueX, parentKind, ptrs) + case diffInserted: + return opts.FormatValue(v.ValueY, parentKind, ptrs) + default: + panic("invalid diff mode") + } + } + + // Register slice element to support cycle detection. + if parentKind == reflect.Slice { + ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, true) + defer ptrs.Pop() + defer func() { out = wrapTrunkReferences(ptrRefs, out) }() + } + + // Descend into the child value node. + if v.TransformerName != "" { + out := opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs) + out = &textWrap{Prefix: "Inverse(" + v.TransformerName + ", ", Value: out, Suffix: ")"} + return opts.FormatType(v.Type, out) + } else { + switch k := v.Type.Kind(); k { + case reflect.Struct, reflect.Array, reflect.Slice: + out = opts.formatDiffList(v.Records, k, ptrs) + out = opts.FormatType(v.Type, out) + case reflect.Map: + // Register map to support cycle detection. + ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false) + defer ptrs.Pop() + + out = opts.formatDiffList(v.Records, k, ptrs) + out = wrapTrunkReferences(ptrRefs, out) + out = opts.FormatType(v.Type, out) + case reflect.Ptr: + // Register pointer to support cycle detection. + ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false) + defer ptrs.Pop() + + out = opts.FormatDiff(v.Value, ptrs) + out = wrapTrunkReferences(ptrRefs, out) + out = &textWrap{Prefix: "&", Value: out} + case reflect.Interface: + out = opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs) + default: + panic(fmt.Sprintf("%v cannot have children", k)) + } + return out + } +} + +func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind, ptrs *pointerReferences) textNode { + // Derive record name based on the data structure kind. + var name string + var formatKey func(reflect.Value) string + switch k { + case reflect.Struct: + name = "field" + opts = opts.WithTypeMode(autoType) + formatKey = func(v reflect.Value) string { return v.String() } + case reflect.Slice, reflect.Array: + name = "element" + opts = opts.WithTypeMode(elideType) + formatKey = func(reflect.Value) string { return "" } + case reflect.Map: + name = "entry" + opts = opts.WithTypeMode(elideType) + formatKey = func(v reflect.Value) string { return formatMapKey(v, false, ptrs) } + } + + maxLen := -1 + if opts.LimitVerbosity { + if opts.DiffMode == diffIdentical { + maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... + } else { + maxLen = (1 << opts.verbosity()) << 1 // 2, 4, 8, 16, 32, 64, etc... + } + opts.VerbosityLevel-- + } + + // Handle unification. + switch opts.DiffMode { + case diffIdentical, diffRemoved, diffInserted: + var list textList + var deferredEllipsis bool // Add final "..." to indicate records were dropped + for _, r := range recs { + if len(list) == maxLen { + deferredEllipsis = true + break + } + + // Elide struct fields that are zero value. + if k == reflect.Struct { + var isZero bool + switch opts.DiffMode { + case diffIdentical: + isZero = r.Value.ValueX.IsZero() || r.Value.ValueY.IsZero() + case diffRemoved: + isZero = r.Value.ValueX.IsZero() + case diffInserted: + isZero = r.Value.ValueY.IsZero() + } + if isZero { + continue + } + } + // Elide ignored nodes. + if r.Value.NumIgnored > 0 && r.Value.NumSame+r.Value.NumDiff == 0 { + deferredEllipsis = !(k == reflect.Slice || k == reflect.Array) + if !deferredEllipsis { + list.AppendEllipsis(diffStats{}) + } + continue + } + if out := opts.FormatDiff(r.Value, ptrs); out != nil { + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + } + } + if deferredEllipsis { + list.AppendEllipsis(diffStats{}) + } + return &textWrap{Prefix: "{", Value: list, Suffix: "}"} + case diffUnknown: + default: + panic("invalid diff mode") + } + + // Handle differencing. + var numDiffs int + var list textList + var keys []reflect.Value // invariant: len(list) == len(keys) + groups := coalesceAdjacentRecords(name, recs) + maxGroup := diffStats{Name: name} + for i, ds := range groups { + if maxLen >= 0 && numDiffs >= maxLen { + maxGroup = maxGroup.Append(ds) + continue + } + + // Handle equal records. + if ds.NumDiff() == 0 { + // Compute the number of leading and trailing records to print. + var numLo, numHi int + numEqual := ds.NumIgnored + ds.NumIdentical + for numLo < numContextRecords && numLo+numHi < numEqual && i != 0 { + if r := recs[numLo].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 { + break + } + numLo++ + } + for numHi < numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 { + if r := recs[numEqual-numHi-1].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 { + break + } + numHi++ + } + if numEqual-(numLo+numHi) == 1 && ds.NumIgnored == 0 { + numHi++ // Avoid pointless coalescing of a single equal record + } + + // Format the equal values. + for _, r := range recs[:numLo] { + out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + keys = append(keys, r.Key) + } + if numEqual > numLo+numHi { + ds.NumIdentical -= numLo + numHi + list.AppendEllipsis(ds) + for len(keys) < len(list) { + keys = append(keys, reflect.Value{}) + } + } + for _, r := range recs[numEqual-numHi : numEqual] { + out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + keys = append(keys, r.Key) + } + recs = recs[numEqual:] + continue + } + + // Handle unequal records. + for _, r := range recs[:ds.NumDiff()] { + switch { + case opts.CanFormatDiffSlice(r.Value): + out := opts.FormatDiffSlice(r.Value) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + keys = append(keys, r.Key) + case r.Value.NumChildren == r.Value.MaxDepth: + outx := opts.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs) + outy := opts.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs) + for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ { + opts2 := verbosityPreset(opts, i) + outx = opts2.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs) + outy = opts2.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs) + } + if outx != nil { + list = append(list, textRecord{Diff: diffRemoved, Key: formatKey(r.Key), Value: outx}) + keys = append(keys, r.Key) + } + if outy != nil { + list = append(list, textRecord{Diff: diffInserted, Key: formatKey(r.Key), Value: outy}) + keys = append(keys, r.Key) + } + default: + out := opts.FormatDiff(r.Value, ptrs) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + keys = append(keys, r.Key) + } + } + recs = recs[ds.NumDiff():] + numDiffs += ds.NumDiff() + } + if maxGroup.IsZero() { + assert(len(recs) == 0) + } else { + list.AppendEllipsis(maxGroup) + for len(keys) < len(list) { + keys = append(keys, reflect.Value{}) + } + } + assert(len(list) == len(keys)) + + // For maps, the default formatting logic uses fmt.Stringer which may + // produce ambiguous output. Avoid calling String to disambiguate. + if k == reflect.Map { + var ambiguous bool + seenKeys := map[string]reflect.Value{} + for i, currKey := range keys { + if currKey.IsValid() { + strKey := list[i].Key + prevKey, seen := seenKeys[strKey] + if seen && prevKey.CanInterface() && currKey.CanInterface() { + ambiguous = prevKey.Interface() != currKey.Interface() + if ambiguous { + break + } + } + seenKeys[strKey] = currKey + } + } + if ambiguous { + for i, k := range keys { + if k.IsValid() { + list[i].Key = formatMapKey(k, true, ptrs) + } + } + } + } + + return &textWrap{Prefix: "{", Value: list, Suffix: "}"} +} + +// coalesceAdjacentRecords coalesces the list of records into groups of +// adjacent equal, or unequal counts. +func coalesceAdjacentRecords(name string, recs []reportRecord) (groups []diffStats) { + var prevCase int // Arbitrary index into which case last occurred + lastStats := func(i int) *diffStats { + if prevCase != i { + groups = append(groups, diffStats{Name: name}) + prevCase = i + } + return &groups[len(groups)-1] + } + for _, r := range recs { + switch rv := r.Value; { + case rv.NumIgnored > 0 && rv.NumSame+rv.NumDiff == 0: + lastStats(1).NumIgnored++ + case rv.NumDiff == 0: + lastStats(1).NumIdentical++ + case rv.NumDiff > 0 && !rv.ValueY.IsValid(): + lastStats(2).NumRemoved++ + case rv.NumDiff > 0 && !rv.ValueX.IsValid(): + lastStats(2).NumInserted++ + default: + lastStats(2).NumModified++ + } + } + return groups +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_references.go b/vendor/github.com/google/go-cmp/cmp/report_references.go new file mode 100644 index 00000000..be31b33a --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_references.go @@ -0,0 +1,264 @@ +// Copyright 2020, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmp + +import ( + "fmt" + "reflect" + "strings" + + "github.com/google/go-cmp/cmp/internal/flags" + "github.com/google/go-cmp/cmp/internal/value" +) + +const ( + pointerDelimPrefix = "⟪" + pointerDelimSuffix = "⟫" +) + +// formatPointer prints the address of the pointer. +func formatPointer(p value.Pointer, withDelims bool) string { + v := p.Uintptr() + if flags.Deterministic { + v = 0xdeadf00f // Only used for stable testing purposes + } + if withDelims { + return pointerDelimPrefix + formatHex(uint64(v)) + pointerDelimSuffix + } + return formatHex(uint64(v)) +} + +// pointerReferences is a stack of pointers visited so far. +type pointerReferences [][2]value.Pointer + +func (ps *pointerReferences) PushPair(vx, vy reflect.Value, d diffMode, deref bool) (pp [2]value.Pointer) { + if deref && vx.IsValid() { + vx = vx.Addr() + } + if deref && vy.IsValid() { + vy = vy.Addr() + } + switch d { + case diffUnknown, diffIdentical: + pp = [2]value.Pointer{value.PointerOf(vx), value.PointerOf(vy)} + case diffRemoved: + pp = [2]value.Pointer{value.PointerOf(vx), value.Pointer{}} + case diffInserted: + pp = [2]value.Pointer{value.Pointer{}, value.PointerOf(vy)} + } + *ps = append(*ps, pp) + return pp +} + +func (ps *pointerReferences) Push(v reflect.Value) (p value.Pointer, seen bool) { + p = value.PointerOf(v) + for _, pp := range *ps { + if p == pp[0] || p == pp[1] { + return p, true + } + } + *ps = append(*ps, [2]value.Pointer{p, p}) + return p, false +} + +func (ps *pointerReferences) Pop() { + *ps = (*ps)[:len(*ps)-1] +} + +// trunkReferences is metadata for a textNode indicating that the sub-tree +// represents the value for either pointer in a pair of references. +type trunkReferences struct{ pp [2]value.Pointer } + +// trunkReference is metadata for a textNode indicating that the sub-tree +// represents the value for the given pointer reference. +type trunkReference struct{ p value.Pointer } + +// leafReference is metadata for a textNode indicating that the value is +// truncated as it refers to another part of the tree (i.e., a trunk). +type leafReference struct{ p value.Pointer } + +func wrapTrunkReferences(pp [2]value.Pointer, s textNode) textNode { + switch { + case pp[0].IsNil(): + return &textWrap{Value: s, Metadata: trunkReference{pp[1]}} + case pp[1].IsNil(): + return &textWrap{Value: s, Metadata: trunkReference{pp[0]}} + case pp[0] == pp[1]: + return &textWrap{Value: s, Metadata: trunkReference{pp[0]}} + default: + return &textWrap{Value: s, Metadata: trunkReferences{pp}} + } +} +func wrapTrunkReference(p value.Pointer, printAddress bool, s textNode) textNode { + var prefix string + if printAddress { + prefix = formatPointer(p, true) + } + return &textWrap{Prefix: prefix, Value: s, Metadata: trunkReference{p}} +} +func makeLeafReference(p value.Pointer, printAddress bool) textNode { + out := &textWrap{Prefix: "(", Value: textEllipsis, Suffix: ")"} + var prefix string + if printAddress { + prefix = formatPointer(p, true) + } + return &textWrap{Prefix: prefix, Value: out, Metadata: leafReference{p}} +} + +// resolveReferences walks the textNode tree searching for any leaf reference +// metadata and resolves each against the corresponding trunk references. +// Since pointer addresses in memory are not particularly readable to the user, +// it replaces each pointer value with an arbitrary and unique reference ID. +func resolveReferences(s textNode) { + var walkNodes func(textNode, func(textNode)) + walkNodes = func(s textNode, f func(textNode)) { + f(s) + switch s := s.(type) { + case *textWrap: + walkNodes(s.Value, f) + case textList: + for _, r := range s { + walkNodes(r.Value, f) + } + } + } + + // Collect all trunks and leaves with reference metadata. + var trunks, leaves []*textWrap + walkNodes(s, func(s textNode) { + if s, ok := s.(*textWrap); ok { + switch s.Metadata.(type) { + case leafReference: + leaves = append(leaves, s) + case trunkReference, trunkReferences: + trunks = append(trunks, s) + } + } + }) + + // No leaf references to resolve. + if len(leaves) == 0 { + return + } + + // Collect the set of all leaf references to resolve. + leafPtrs := make(map[value.Pointer]bool) + for _, leaf := range leaves { + leafPtrs[leaf.Metadata.(leafReference).p] = true + } + + // Collect the set of trunk pointers that are always paired together. + // This allows us to assign a single ID to both pointers for brevity. + // If a pointer in a pair ever occurs by itself or as a different pair, + // then the pair is broken. + pairedTrunkPtrs := make(map[value.Pointer]value.Pointer) + unpair := func(p value.Pointer) { + if !pairedTrunkPtrs[p].IsNil() { + pairedTrunkPtrs[pairedTrunkPtrs[p]] = value.Pointer{} // invalidate other half + } + pairedTrunkPtrs[p] = value.Pointer{} // invalidate this half + } + for _, trunk := range trunks { + switch p := trunk.Metadata.(type) { + case trunkReference: + unpair(p.p) // standalone pointer cannot be part of a pair + case trunkReferences: + p0, ok0 := pairedTrunkPtrs[p.pp[0]] + p1, ok1 := pairedTrunkPtrs[p.pp[1]] + switch { + case !ok0 && !ok1: + // Register the newly seen pair. + pairedTrunkPtrs[p.pp[0]] = p.pp[1] + pairedTrunkPtrs[p.pp[1]] = p.pp[0] + case ok0 && ok1 && p0 == p.pp[1] && p1 == p.pp[0]: + // Exact pair already seen; do nothing. + default: + // Pair conflicts with some other pair; break all pairs. + unpair(p.pp[0]) + unpair(p.pp[1]) + } + } + } + + // Correlate each pointer referenced by leaves to a unique identifier, + // and print the IDs for each trunk that matches those pointers. + var nextID uint + ptrIDs := make(map[value.Pointer]uint) + newID := func() uint { + id := nextID + nextID++ + return id + } + for _, trunk := range trunks { + switch p := trunk.Metadata.(type) { + case trunkReference: + if print := leafPtrs[p.p]; print { + id, ok := ptrIDs[p.p] + if !ok { + id = newID() + ptrIDs[p.p] = id + } + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id)) + } + case trunkReferences: + print0 := leafPtrs[p.pp[0]] + print1 := leafPtrs[p.pp[1]] + if print0 || print1 { + id0, ok0 := ptrIDs[p.pp[0]] + id1, ok1 := ptrIDs[p.pp[1]] + isPair := pairedTrunkPtrs[p.pp[0]] == p.pp[1] && pairedTrunkPtrs[p.pp[1]] == p.pp[0] + if isPair { + var id uint + assert(ok0 == ok1) // must be seen together or not at all + if ok0 { + assert(id0 == id1) // must have the same ID + id = id0 + } else { + id = newID() + ptrIDs[p.pp[0]] = id + ptrIDs[p.pp[1]] = id + } + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id)) + } else { + if print0 && !ok0 { + id0 = newID() + ptrIDs[p.pp[0]] = id0 + } + if print1 && !ok1 { + id1 = newID() + ptrIDs[p.pp[1]] = id1 + } + switch { + case print0 && print1: + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0)+","+formatReference(id1)) + case print0: + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0)) + case print1: + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id1)) + } + } + } + } + } + + // Update all leaf references with the unique identifier. + for _, leaf := range leaves { + if id, ok := ptrIDs[leaf.Metadata.(leafReference).p]; ok { + leaf.Prefix = updateReferencePrefix(leaf.Prefix, formatReference(id)) + } + } +} + +func formatReference(id uint) string { + return fmt.Sprintf("ref#%d", id) +} + +func updateReferencePrefix(prefix, ref string) string { + if prefix == "" { + return pointerDelimPrefix + ref + pointerDelimSuffix + } + suffix := strings.TrimPrefix(prefix, pointerDelimPrefix) + return pointerDelimPrefix + ref + ": " + suffix +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/vendor/github.com/google/go-cmp/cmp/report_reflect.go new file mode 100644 index 00000000..2ab41fad --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_reflect.go @@ -0,0 +1,414 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmp + +import ( + "bytes" + "fmt" + "reflect" + "strconv" + "strings" + "unicode" + "unicode/utf8" + + "github.com/google/go-cmp/cmp/internal/value" +) + +var ( + anyType = reflect.TypeOf((*interface{})(nil)).Elem() + stringType = reflect.TypeOf((*string)(nil)).Elem() + bytesType = reflect.TypeOf((*[]byte)(nil)).Elem() + byteType = reflect.TypeOf((*byte)(nil)).Elem() +) + +type formatValueOptions struct { + // AvoidStringer controls whether to avoid calling custom stringer + // methods like error.Error or fmt.Stringer.String. + AvoidStringer bool + + // PrintAddresses controls whether to print the address of all pointers, + // slice elements, and maps. + PrintAddresses bool + + // QualifiedNames controls whether FormatType uses the fully qualified name + // (including the full package path as opposed to just the package name). + QualifiedNames bool + + // VerbosityLevel controls the amount of output to produce. + // A higher value produces more output. A value of zero or lower produces + // no output (represented using an ellipsis). + // If LimitVerbosity is false, then the level is treated as infinite. + VerbosityLevel int + + // LimitVerbosity specifies that formatting should respect VerbosityLevel. + LimitVerbosity bool +} + +// FormatType prints the type as if it were wrapping s. +// This may return s as-is depending on the current type and TypeMode mode. +func (opts formatOptions) FormatType(t reflect.Type, s textNode) textNode { + // Check whether to emit the type or not. + switch opts.TypeMode { + case autoType: + switch t.Kind() { + case reflect.Struct, reflect.Slice, reflect.Array, reflect.Map: + if s.Equal(textNil) { + return s + } + default: + return s + } + if opts.DiffMode == diffIdentical { + return s // elide type for identical nodes + } + case elideType: + return s + } + + // Determine the type label, applying special handling for unnamed types. + typeName := value.TypeString(t, opts.QualifiedNames) + if t.Name() == "" { + // According to Go grammar, certain type literals contain symbols that + // do not strongly bind to the next lexicographical token (e.g., *T). + switch t.Kind() { + case reflect.Chan, reflect.Func, reflect.Ptr: + typeName = "(" + typeName + ")" + } + } + return &textWrap{Prefix: typeName, Value: wrapParens(s)} +} + +// wrapParens wraps s with a set of parenthesis, but avoids it if the +// wrapped node itself is already surrounded by a pair of parenthesis or braces. +// It handles unwrapping one level of pointer-reference nodes. +func wrapParens(s textNode) textNode { + var refNode *textWrap + if s2, ok := s.(*textWrap); ok { + // Unwrap a single pointer reference node. + switch s2.Metadata.(type) { + case leafReference, trunkReference, trunkReferences: + refNode = s2 + if s3, ok := refNode.Value.(*textWrap); ok { + s2 = s3 + } + } + + // Already has delimiters that make parenthesis unnecessary. + hasParens := strings.HasPrefix(s2.Prefix, "(") && strings.HasSuffix(s2.Suffix, ")") + hasBraces := strings.HasPrefix(s2.Prefix, "{") && strings.HasSuffix(s2.Suffix, "}") + if hasParens || hasBraces { + return s + } + } + if refNode != nil { + refNode.Value = &textWrap{Prefix: "(", Value: refNode.Value, Suffix: ")"} + return s + } + return &textWrap{Prefix: "(", Value: s, Suffix: ")"} +} + +// FormatValue prints the reflect.Value, taking extra care to avoid descending +// into pointers already in ptrs. As pointers are visited, ptrs is also updated. +func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, ptrs *pointerReferences) (out textNode) { + if !v.IsValid() { + return nil + } + t := v.Type() + + // Check slice element for cycles. + if parentKind == reflect.Slice { + ptrRef, visited := ptrs.Push(v.Addr()) + if visited { + return makeLeafReference(ptrRef, false) + } + defer ptrs.Pop() + defer func() { out = wrapTrunkReference(ptrRef, false, out) }() + } + + // Check whether there is an Error or String method to call. + if !opts.AvoidStringer && v.CanInterface() { + // Avoid calling Error or String methods on nil receivers since many + // implementations crash when doing so. + if (t.Kind() != reflect.Ptr && t.Kind() != reflect.Interface) || !v.IsNil() { + var prefix, strVal string + func() { + // Swallow and ignore any panics from String or Error. + defer func() { recover() }() + switch v := v.Interface().(type) { + case error: + strVal = v.Error() + prefix = "e" + case fmt.Stringer: + strVal = v.String() + prefix = "s" + } + }() + if prefix != "" { + return opts.formatString(prefix, strVal) + } + } + } + + // Check whether to explicitly wrap the result with the type. + var skipType bool + defer func() { + if !skipType { + out = opts.FormatType(t, out) + } + }() + + switch t.Kind() { + case reflect.Bool: + return textLine(fmt.Sprint(v.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return textLine(fmt.Sprint(v.Int())) + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return textLine(fmt.Sprint(v.Uint())) + case reflect.Uint8: + if parentKind == reflect.Slice || parentKind == reflect.Array { + return textLine(formatHex(v.Uint())) + } + return textLine(fmt.Sprint(v.Uint())) + case reflect.Uintptr: + return textLine(formatHex(v.Uint())) + case reflect.Float32, reflect.Float64: + return textLine(fmt.Sprint(v.Float())) + case reflect.Complex64, reflect.Complex128: + return textLine(fmt.Sprint(v.Complex())) + case reflect.String: + return opts.formatString("", v.String()) + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + return textLine(formatPointer(value.PointerOf(v), true)) + case reflect.Struct: + var list textList + v := makeAddressable(v) // needed for retrieveUnexportedField + maxLen := v.NumField() + if opts.LimitVerbosity { + maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... + opts.VerbosityLevel-- + } + for i := 0; i < v.NumField(); i++ { + vv := v.Field(i) + if vv.IsZero() { + continue // Elide fields with zero values + } + if len(list) == maxLen { + list.AppendEllipsis(diffStats{}) + break + } + sf := t.Field(i) + if supportExporters && !isExported(sf.Name) { + vv = retrieveUnexportedField(v, sf, true) + } + s := opts.WithTypeMode(autoType).FormatValue(vv, t.Kind(), ptrs) + list = append(list, textRecord{Key: sf.Name, Value: s}) + } + return &textWrap{Prefix: "{", Value: list, Suffix: "}"} + case reflect.Slice: + if v.IsNil() { + return textNil + } + + // Check whether this is a []byte of text data. + if t.Elem() == byteType { + b := v.Bytes() + isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) || unicode.IsSpace(r) } + if len(b) > 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 { + out = opts.formatString("", string(b)) + skipType = true + return opts.FormatType(t, out) + } + } + + fallthrough + case reflect.Array: + maxLen := v.Len() + if opts.LimitVerbosity { + maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... + opts.VerbosityLevel-- + } + var list textList + for i := 0; i < v.Len(); i++ { + if len(list) == maxLen { + list.AppendEllipsis(diffStats{}) + break + } + s := opts.WithTypeMode(elideType).FormatValue(v.Index(i), t.Kind(), ptrs) + list = append(list, textRecord{Value: s}) + } + + out = &textWrap{Prefix: "{", Value: list, Suffix: "}"} + if t.Kind() == reflect.Slice && opts.PrintAddresses { + header := fmt.Sprintf("ptr:%v, len:%d, cap:%d", formatPointer(value.PointerOf(v), false), v.Len(), v.Cap()) + out = &textWrap{Prefix: pointerDelimPrefix + header + pointerDelimSuffix, Value: out} + } + return out + case reflect.Map: + if v.IsNil() { + return textNil + } + + // Check pointer for cycles. + ptrRef, visited := ptrs.Push(v) + if visited { + return makeLeafReference(ptrRef, opts.PrintAddresses) + } + defer ptrs.Pop() + + maxLen := v.Len() + if opts.LimitVerbosity { + maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... + opts.VerbosityLevel-- + } + var list textList + for _, k := range value.SortKeys(v.MapKeys()) { + if len(list) == maxLen { + list.AppendEllipsis(diffStats{}) + break + } + sk := formatMapKey(k, false, ptrs) + sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), t.Kind(), ptrs) + list = append(list, textRecord{Key: sk, Value: sv}) + } + + out = &textWrap{Prefix: "{", Value: list, Suffix: "}"} + out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out) + return out + case reflect.Ptr: + if v.IsNil() { + return textNil + } + + // Check pointer for cycles. + ptrRef, visited := ptrs.Push(v) + if visited { + out = makeLeafReference(ptrRef, opts.PrintAddresses) + return &textWrap{Prefix: "&", Value: out} + } + defer ptrs.Pop() + + // Skip the name only if this is an unnamed pointer type. + // Otherwise taking the address of a value does not reproduce + // the named pointer type. + if v.Type().Name() == "" { + skipType = true // Let the underlying value print the type instead + } + out = opts.FormatValue(v.Elem(), t.Kind(), ptrs) + out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out) + out = &textWrap{Prefix: "&", Value: out} + return out + case reflect.Interface: + if v.IsNil() { + return textNil + } + // Interfaces accept different concrete types, + // so configure the underlying value to explicitly print the type. + return opts.WithTypeMode(emitType).FormatValue(v.Elem(), t.Kind(), ptrs) + default: + panic(fmt.Sprintf("%v kind not handled", v.Kind())) + } +} + +func (opts formatOptions) formatString(prefix, s string) textNode { + maxLen := len(s) + maxLines := strings.Count(s, "\n") + 1 + if opts.LimitVerbosity { + maxLen = (1 << opts.verbosity()) << 5 // 32, 64, 128, 256, etc... + maxLines = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc... + } + + // For multiline strings, use the triple-quote syntax, + // but only use it when printing removed or inserted nodes since + // we only want the extra verbosity for those cases. + lines := strings.Split(strings.TrimSuffix(s, "\n"), "\n") + isTripleQuoted := len(lines) >= 4 && (opts.DiffMode == '-' || opts.DiffMode == '+') + for i := 0; i < len(lines) && isTripleQuoted; i++ { + lines[i] = strings.TrimPrefix(strings.TrimSuffix(lines[i], "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support + isPrintable := func(r rune) bool { + return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable + } + line := lines[i] + isTripleQuoted = !strings.HasPrefix(strings.TrimPrefix(line, prefix), `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == "" && len(line) <= maxLen + } + if isTripleQuoted { + var list textList + list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true}) + for i, line := range lines { + if numElided := len(lines) - i; i == maxLines-1 && numElided > 1 { + comment := commentString(fmt.Sprintf("%d elided lines", numElided)) + list = append(list, textRecord{Diff: opts.DiffMode, Value: textEllipsis, ElideComma: true, Comment: comment}) + break + } + list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(line), ElideComma: true}) + } + list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true}) + return &textWrap{Prefix: "(", Value: list, Suffix: ")"} + } + + // Format the string as a single-line quoted string. + if len(s) > maxLen+len(textEllipsis) { + return textLine(prefix + formatString(s[:maxLen]) + string(textEllipsis)) + } + return textLine(prefix + formatString(s)) +} + +// formatMapKey formats v as if it were a map key. +// The result is guaranteed to be a single line. +func formatMapKey(v reflect.Value, disambiguate bool, ptrs *pointerReferences) string { + var opts formatOptions + opts.DiffMode = diffIdentical + opts.TypeMode = elideType + opts.PrintAddresses = disambiguate + opts.AvoidStringer = disambiguate + opts.QualifiedNames = disambiguate + opts.VerbosityLevel = maxVerbosityPreset + opts.LimitVerbosity = true + s := opts.FormatValue(v, reflect.Map, ptrs).String() + return strings.TrimSpace(s) +} + +// formatString prints s as a double-quoted or backtick-quoted string. +func formatString(s string) string { + // Use quoted string if it the same length as a raw string literal. + // Otherwise, attempt to use the raw string form. + qs := strconv.Quote(s) + if len(qs) == 1+len(s)+1 { + return qs + } + + // Disallow newlines to ensure output is a single line. + // Only allow printable runes for readability purposes. + rawInvalid := func(r rune) bool { + return r == '`' || r == '\n' || !(unicode.IsPrint(r) || r == '\t') + } + if utf8.ValidString(s) && strings.IndexFunc(s, rawInvalid) < 0 { + return "`" + s + "`" + } + return qs +} + +// formatHex prints u as a hexadecimal integer in Go notation. +func formatHex(u uint64) string { + var f string + switch { + case u <= 0xff: + f = "0x%02x" + case u <= 0xffff: + f = "0x%04x" + case u <= 0xffffff: + f = "0x%06x" + case u <= 0xffffffff: + f = "0x%08x" + case u <= 0xffffffffff: + f = "0x%010x" + case u <= 0xffffffffffff: + f = "0x%012x" + case u <= 0xffffffffffffff: + f = "0x%014x" + case u <= 0xffffffffffffffff: + f = "0x%016x" + } + return fmt.Sprintf(f, u) +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_slices.go b/vendor/github.com/google/go-cmp/cmp/report_slices.go new file mode 100644 index 00000000..23e444f6 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_slices.go @@ -0,0 +1,614 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmp + +import ( + "bytes" + "fmt" + "math" + "reflect" + "strconv" + "strings" + "unicode" + "unicode/utf8" + + "github.com/google/go-cmp/cmp/internal/diff" +) + +// CanFormatDiffSlice reports whether we support custom formatting for nodes +// that are slices of primitive kinds or strings. +func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { + switch { + case opts.DiffMode != diffUnknown: + return false // Must be formatting in diff mode + case v.NumDiff == 0: + return false // No differences detected + case !v.ValueX.IsValid() || !v.ValueY.IsValid(): + return false // Both values must be valid + case v.NumIgnored > 0: + return false // Some ignore option was used + case v.NumTransformed > 0: + return false // Some transform option was used + case v.NumCompared > 1: + return false // More than one comparison was used + case v.NumCompared == 1 && v.Type.Name() != "": + // The need for cmp to check applicability of options on every element + // in a slice is a significant performance detriment for large []byte. + // The workaround is to specify Comparer(bytes.Equal), + // which enables cmp to compare []byte more efficiently. + // If they differ, we still want to provide batched diffing. + // The logic disallows named types since they tend to have their own + // String method, with nicer formatting than what this provides. + return false + } + + // Check whether this is an interface with the same concrete types. + t := v.Type + vx, vy := v.ValueX, v.ValueY + if t.Kind() == reflect.Interface && !vx.IsNil() && !vy.IsNil() && vx.Elem().Type() == vy.Elem().Type() { + vx, vy = vx.Elem(), vy.Elem() + t = vx.Type() + } + + // Check whether we provide specialized diffing for this type. + switch t.Kind() { + case reflect.String: + case reflect.Array, reflect.Slice: + // Only slices of primitive types have specialized handling. + switch t.Elem().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, + reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: + default: + return false + } + + // Both slice values have to be non-empty. + if t.Kind() == reflect.Slice && (vx.Len() == 0 || vy.Len() == 0) { + return false + } + + // If a sufficient number of elements already differ, + // use specialized formatting even if length requirement is not met. + if v.NumDiff > v.NumSame { + return true + } + default: + return false + } + + // Use specialized string diffing for longer slices or strings. + const minLength = 32 + return vx.Len() >= minLength && vy.Len() >= minLength +} + +// FormatDiffSlice prints a diff for the slices (or strings) represented by v. +// This provides custom-tailored logic to make printing of differences in +// textual strings and slices of primitive kinds more readable. +func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { + assert(opts.DiffMode == diffUnknown) + t, vx, vy := v.Type, v.ValueX, v.ValueY + if t.Kind() == reflect.Interface { + vx, vy = vx.Elem(), vy.Elem() + t = vx.Type() + opts = opts.WithTypeMode(emitType) + } + + // Auto-detect the type of the data. + var sx, sy string + var ssx, ssy []string + var isString, isMostlyText, isPureLinedText, isBinary bool + switch { + case t.Kind() == reflect.String: + sx, sy = vx.String(), vy.String() + isString = true + case t.Kind() == reflect.Slice && t.Elem() == byteType: + sx, sy = string(vx.Bytes()), string(vy.Bytes()) + isString = true + case t.Kind() == reflect.Array: + // Arrays need to be addressable for slice operations to work. + vx2, vy2 := reflect.New(t).Elem(), reflect.New(t).Elem() + vx2.Set(vx) + vy2.Set(vy) + vx, vy = vx2, vy2 + } + if isString { + var numTotalRunes, numValidRunes, numLines, lastLineIdx, maxLineLen int + for i, r := range sx + sy { + numTotalRunes++ + if (unicode.IsPrint(r) || unicode.IsSpace(r)) && r != utf8.RuneError { + numValidRunes++ + } + if r == '\n' { + if maxLineLen < i-lastLineIdx { + maxLineLen = i - lastLineIdx + } + lastLineIdx = i + 1 + numLines++ + } + } + isPureText := numValidRunes == numTotalRunes + isMostlyText = float64(numValidRunes) > math.Floor(0.90*float64(numTotalRunes)) + isPureLinedText = isPureText && numLines >= 4 && maxLineLen <= 1024 + isBinary = !isMostlyText + + // Avoid diffing by lines if it produces a significantly more complex + // edit script than diffing by bytes. + if isPureLinedText { + ssx = strings.Split(sx, "\n") + ssy = strings.Split(sy, "\n") + esLines := diff.Difference(len(ssx), len(ssy), func(ix, iy int) diff.Result { + return diff.BoolResult(ssx[ix] == ssy[iy]) + }) + esBytes := diff.Difference(len(sx), len(sy), func(ix, iy int) diff.Result { + return diff.BoolResult(sx[ix] == sy[iy]) + }) + efficiencyLines := float64(esLines.Dist()) / float64(len(esLines)) + efficiencyBytes := float64(esBytes.Dist()) / float64(len(esBytes)) + quotedLength := len(strconv.Quote(sx + sy)) + unquotedLength := len(sx) + len(sy) + escapeExpansionRatio := float64(quotedLength) / float64(unquotedLength) + isPureLinedText = efficiencyLines < 4*efficiencyBytes || escapeExpansionRatio > 1.1 + } + } + + // Format the string into printable records. + var list textList + var delim string + switch { + // If the text appears to be multi-lined text, + // then perform differencing across individual lines. + case isPureLinedText: + list = opts.formatDiffSlice( + reflect.ValueOf(ssx), reflect.ValueOf(ssy), 1, "line", + func(v reflect.Value, d diffMode) textRecord { + s := formatString(v.Index(0).String()) + return textRecord{Diff: d, Value: textLine(s)} + }, + ) + delim = "\n" + + // If possible, use a custom triple-quote (""") syntax for printing + // differences in a string literal. This format is more readable, + // but has edge-cases where differences are visually indistinguishable. + // This format is avoided under the following conditions: + // - A line starts with `"""` + // - A line starts with "..." + // - A line contains non-printable characters + // - Adjacent different lines differ only by whitespace + // + // For example: + // + // """ + // ... // 3 identical lines + // foo + // bar + // - baz + // + BAZ + // """ + isTripleQuoted := true + prevRemoveLines := map[string]bool{} + prevInsertLines := map[string]bool{} + var list2 textList + list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true}) + for _, r := range list { + if !r.Value.Equal(textEllipsis) { + line, _ := strconv.Unquote(string(r.Value.(textLine))) + line = strings.TrimPrefix(strings.TrimSuffix(line, "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support + normLine := strings.Map(func(r rune) rune { + if unicode.IsSpace(r) { + return -1 // drop whitespace to avoid visually indistinguishable output + } + return r + }, line) + isPrintable := func(r rune) bool { + return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable + } + isTripleQuoted = !strings.HasPrefix(line, `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == "" + switch r.Diff { + case diffRemoved: + isTripleQuoted = isTripleQuoted && !prevInsertLines[normLine] + prevRemoveLines[normLine] = true + case diffInserted: + isTripleQuoted = isTripleQuoted && !prevRemoveLines[normLine] + prevInsertLines[normLine] = true + } + if !isTripleQuoted { + break + } + r.Value = textLine(line) + r.ElideComma = true + } + if !(r.Diff == diffRemoved || r.Diff == diffInserted) { // start a new non-adjacent difference group + prevRemoveLines = map[string]bool{} + prevInsertLines = map[string]bool{} + } + list2 = append(list2, r) + } + if r := list2[len(list2)-1]; r.Diff == diffIdentical && len(r.Value.(textLine)) == 0 { + list2 = list2[:len(list2)-1] // elide single empty line at the end + } + list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true}) + if isTripleQuoted { + var out textNode = &textWrap{Prefix: "(", Value: list2, Suffix: ")"} + switch t.Kind() { + case reflect.String: + if t != stringType { + out = opts.FormatType(t, out) + } + case reflect.Slice: + // Always emit type for slices since the triple-quote syntax + // looks like a string (not a slice). + opts = opts.WithTypeMode(emitType) + out = opts.FormatType(t, out) + } + return out + } + + // If the text appears to be single-lined text, + // then perform differencing in approximately fixed-sized chunks. + // The output is printed as quoted strings. + case isMostlyText: + list = opts.formatDiffSlice( + reflect.ValueOf(sx), reflect.ValueOf(sy), 64, "byte", + func(v reflect.Value, d diffMode) textRecord { + s := formatString(v.String()) + return textRecord{Diff: d, Value: textLine(s)} + }, + ) + + // If the text appears to be binary data, + // then perform differencing in approximately fixed-sized chunks. + // The output is inspired by hexdump. + case isBinary: + list = opts.formatDiffSlice( + reflect.ValueOf(sx), reflect.ValueOf(sy), 16, "byte", + func(v reflect.Value, d diffMode) textRecord { + var ss []string + for i := 0; i < v.Len(); i++ { + ss = append(ss, formatHex(v.Index(i).Uint())) + } + s := strings.Join(ss, ", ") + comment := commentString(fmt.Sprintf("%c|%v|", d, formatASCII(v.String()))) + return textRecord{Diff: d, Value: textLine(s), Comment: comment} + }, + ) + + // For all other slices of primitive types, + // then perform differencing in approximately fixed-sized chunks. + // The size of each chunk depends on the width of the element kind. + default: + var chunkSize int + if t.Elem().Kind() == reflect.Bool { + chunkSize = 16 + } else { + switch t.Elem().Bits() { + case 8: + chunkSize = 16 + case 16: + chunkSize = 12 + case 32: + chunkSize = 8 + default: + chunkSize = 8 + } + } + list = opts.formatDiffSlice( + vx, vy, chunkSize, t.Elem().Kind().String(), + func(v reflect.Value, d diffMode) textRecord { + var ss []string + for i := 0; i < v.Len(); i++ { + switch t.Elem().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + ss = append(ss, fmt.Sprint(v.Index(i).Int())) + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + ss = append(ss, fmt.Sprint(v.Index(i).Uint())) + case reflect.Uint8, reflect.Uintptr: + ss = append(ss, formatHex(v.Index(i).Uint())) + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: + ss = append(ss, fmt.Sprint(v.Index(i).Interface())) + } + } + s := strings.Join(ss, ", ") + return textRecord{Diff: d, Value: textLine(s)} + }, + ) + } + + // Wrap the output with appropriate type information. + var out textNode = &textWrap{Prefix: "{", Value: list, Suffix: "}"} + if !isMostlyText { + // The "{...}" byte-sequence literal is not valid Go syntax for strings. + // Emit the type for extra clarity (e.g. "string{...}"). + if t.Kind() == reflect.String { + opts = opts.WithTypeMode(emitType) + } + return opts.FormatType(t, out) + } + switch t.Kind() { + case reflect.String: + out = &textWrap{Prefix: "strings.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)} + if t != stringType { + out = opts.FormatType(t, out) + } + case reflect.Slice: + out = &textWrap{Prefix: "bytes.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)} + if t != bytesType { + out = opts.FormatType(t, out) + } + } + return out +} + +// formatASCII formats s as an ASCII string. +// This is useful for printing binary strings in a semi-legible way. +func formatASCII(s string) string { + b := bytes.Repeat([]byte{'.'}, len(s)) + for i := 0; i < len(s); i++ { + if ' ' <= s[i] && s[i] <= '~' { + b[i] = s[i] + } + } + return string(b) +} + +func (opts formatOptions) formatDiffSlice( + vx, vy reflect.Value, chunkSize int, name string, + makeRec func(reflect.Value, diffMode) textRecord, +) (list textList) { + eq := func(ix, iy int) bool { + return vx.Index(ix).Interface() == vy.Index(iy).Interface() + } + es := diff.Difference(vx.Len(), vy.Len(), func(ix, iy int) diff.Result { + return diff.BoolResult(eq(ix, iy)) + }) + + appendChunks := func(v reflect.Value, d diffMode) int { + n0 := v.Len() + for v.Len() > 0 { + n := chunkSize + if n > v.Len() { + n = v.Len() + } + list = append(list, makeRec(v.Slice(0, n), d)) + v = v.Slice(n, v.Len()) + } + return n0 - v.Len() + } + + var numDiffs int + maxLen := -1 + if opts.LimitVerbosity { + maxLen = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc... + opts.VerbosityLevel-- + } + + groups := coalesceAdjacentEdits(name, es) + groups = coalesceInterveningIdentical(groups, chunkSize/4) + groups = cleanupSurroundingIdentical(groups, eq) + maxGroup := diffStats{Name: name} + for i, ds := range groups { + if maxLen >= 0 && numDiffs >= maxLen { + maxGroup = maxGroup.Append(ds) + continue + } + + // Print equal. + if ds.NumDiff() == 0 { + // Compute the number of leading and trailing equal bytes to print. + var numLo, numHi int + numEqual := ds.NumIgnored + ds.NumIdentical + for numLo < chunkSize*numContextRecords && numLo+numHi < numEqual && i != 0 { + numLo++ + } + for numHi < chunkSize*numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 { + numHi++ + } + if numEqual-(numLo+numHi) <= chunkSize && ds.NumIgnored == 0 { + numHi = numEqual - numLo // Avoid pointless coalescing of single equal row + } + + // Print the equal bytes. + appendChunks(vx.Slice(0, numLo), diffIdentical) + if numEqual > numLo+numHi { + ds.NumIdentical -= numLo + numHi + list.AppendEllipsis(ds) + } + appendChunks(vx.Slice(numEqual-numHi, numEqual), diffIdentical) + vx = vx.Slice(numEqual, vx.Len()) + vy = vy.Slice(numEqual, vy.Len()) + continue + } + + // Print unequal. + len0 := len(list) + nx := appendChunks(vx.Slice(0, ds.NumIdentical+ds.NumRemoved+ds.NumModified), diffRemoved) + vx = vx.Slice(nx, vx.Len()) + ny := appendChunks(vy.Slice(0, ds.NumIdentical+ds.NumInserted+ds.NumModified), diffInserted) + vy = vy.Slice(ny, vy.Len()) + numDiffs += len(list) - len0 + } + if maxGroup.IsZero() { + assert(vx.Len() == 0 && vy.Len() == 0) + } else { + list.AppendEllipsis(maxGroup) + } + return list +} + +// coalesceAdjacentEdits coalesces the list of edits into groups of adjacent +// equal or unequal counts. +// +// Example: +// +// Input: "..XXY...Y" +// Output: [ +// {NumIdentical: 2}, +// {NumRemoved: 2, NumInserted 1}, +// {NumIdentical: 3}, +// {NumInserted: 1}, +// ] +func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) { + var prevMode byte + lastStats := func(mode byte) *diffStats { + if prevMode != mode { + groups = append(groups, diffStats{Name: name}) + prevMode = mode + } + return &groups[len(groups)-1] + } + for _, e := range es { + switch e { + case diff.Identity: + lastStats('=').NumIdentical++ + case diff.UniqueX: + lastStats('!').NumRemoved++ + case diff.UniqueY: + lastStats('!').NumInserted++ + case diff.Modified: + lastStats('!').NumModified++ + } + } + return groups +} + +// coalesceInterveningIdentical coalesces sufficiently short (<= windowSize) +// equal groups into adjacent unequal groups that currently result in a +// dual inserted/removed printout. This acts as a high-pass filter to smooth +// out high-frequency changes within the windowSize. +// +// Example: +// +// WindowSize: 16, +// Input: [ +// {NumIdentical: 61}, // group 0 +// {NumRemoved: 3, NumInserted: 1}, // group 1 +// {NumIdentical: 6}, // ├── coalesce +// {NumInserted: 2}, // ├── coalesce +// {NumIdentical: 1}, // ├── coalesce +// {NumRemoved: 9}, // └── coalesce +// {NumIdentical: 64}, // group 2 +// {NumRemoved: 3, NumInserted: 1}, // group 3 +// {NumIdentical: 6}, // ├── coalesce +// {NumInserted: 2}, // ├── coalesce +// {NumIdentical: 1}, // ├── coalesce +// {NumRemoved: 7}, // ├── coalesce +// {NumIdentical: 1}, // ├── coalesce +// {NumRemoved: 2}, // └── coalesce +// {NumIdentical: 63}, // group 4 +// ] +// Output: [ +// {NumIdentical: 61}, +// {NumIdentical: 7, NumRemoved: 12, NumInserted: 3}, +// {NumIdentical: 64}, +// {NumIdentical: 8, NumRemoved: 12, NumInserted: 3}, +// {NumIdentical: 63}, +// ] +func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats { + groups, groupsOrig := groups[:0], groups + for i, ds := range groupsOrig { + if len(groups) >= 2 && ds.NumDiff() > 0 { + prev := &groups[len(groups)-2] // Unequal group + curr := &groups[len(groups)-1] // Equal group + next := &groupsOrig[i] // Unequal group + hadX, hadY := prev.NumRemoved > 0, prev.NumInserted > 0 + hasX, hasY := next.NumRemoved > 0, next.NumInserted > 0 + if ((hadX || hasX) && (hadY || hasY)) && curr.NumIdentical <= windowSize { + *prev = prev.Append(*curr).Append(*next) + groups = groups[:len(groups)-1] // Truncate off equal group + continue + } + } + groups = append(groups, ds) + } + return groups +} + +// cleanupSurroundingIdentical scans through all unequal groups, and +// moves any leading sequence of equal elements to the preceding equal group and +// moves and trailing sequence of equal elements to the succeeding equal group. +// +// This is necessary since coalesceInterveningIdentical may coalesce edit groups +// together such that leading/trailing spans of equal elements becomes possible. +// Note that this can occur even with an optimal diffing algorithm. +// +// Example: +// +// Input: [ +// {NumIdentical: 61}, +// {NumIdentical: 1 , NumRemoved: 11, NumInserted: 2}, // assume 3 leading identical elements +// {NumIdentical: 67}, +// {NumIdentical: 7, NumRemoved: 12, NumInserted: 3}, // assume 10 trailing identical elements +// {NumIdentical: 54}, +// ] +// Output: [ +// {NumIdentical: 64}, // incremented by 3 +// {NumRemoved: 9}, +// {NumIdentical: 67}, +// {NumRemoved: 9}, +// {NumIdentical: 64}, // incremented by 10 +// ] +func cleanupSurroundingIdentical(groups []diffStats, eq func(i, j int) bool) []diffStats { + var ix, iy int // indexes into sequence x and y + for i, ds := range groups { + // Handle equal group. + if ds.NumDiff() == 0 { + ix += ds.NumIdentical + iy += ds.NumIdentical + continue + } + + // Handle unequal group. + nx := ds.NumIdentical + ds.NumRemoved + ds.NumModified + ny := ds.NumIdentical + ds.NumInserted + ds.NumModified + var numLeadingIdentical, numTrailingIdentical int + for j := 0; j < nx && j < ny && eq(ix+j, iy+j); j++ { + numLeadingIdentical++ + } + for j := 0; j < nx && j < ny && eq(ix+nx-1-j, iy+ny-1-j); j++ { + numTrailingIdentical++ + } + if numIdentical := numLeadingIdentical + numTrailingIdentical; numIdentical > 0 { + if numLeadingIdentical > 0 { + // Remove leading identical span from this group and + // insert it into the preceding group. + if i-1 >= 0 { + groups[i-1].NumIdentical += numLeadingIdentical + } else { + // No preceding group exists, so prepend a new group, + // but do so after we finish iterating over all groups. + defer func() { + groups = append([]diffStats{{Name: groups[0].Name, NumIdentical: numLeadingIdentical}}, groups...) + }() + } + // Increment indexes since the preceding group would have handled this. + ix += numLeadingIdentical + iy += numLeadingIdentical + } + if numTrailingIdentical > 0 { + // Remove trailing identical span from this group and + // insert it into the succeeding group. + if i+1 < len(groups) { + groups[i+1].NumIdentical += numTrailingIdentical + } else { + // No succeeding group exists, so append a new group, + // but do so after we finish iterating over all groups. + defer func() { + groups = append(groups, diffStats{Name: groups[len(groups)-1].Name, NumIdentical: numTrailingIdentical}) + }() + } + // Do not increment indexes since the succeeding group will handle this. + } + + // Update this group since some identical elements were removed. + nx -= numIdentical + ny -= numIdentical + groups[i] = diffStats{Name: ds.Name, NumRemoved: nx, NumInserted: ny} + } + ix += nx + iy += ny + } + return groups +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_text.go b/vendor/github.com/google/go-cmp/cmp/report_text.go new file mode 100644 index 00000000..388fcf57 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_text.go @@ -0,0 +1,432 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmp + +import ( + "bytes" + "fmt" + "math/rand" + "strings" + "time" + "unicode/utf8" + + "github.com/google/go-cmp/cmp/internal/flags" +) + +var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0 + +const maxColumnLength = 80 + +type indentMode int + +func (n indentMode) appendIndent(b []byte, d diffMode) []byte { + // The output of Diff is documented as being unstable to provide future + // flexibility in changing the output for more humanly readable reports. + // This logic intentionally introduces instability to the exact output + // so that users can detect accidental reliance on stability early on, + // rather than much later when an actual change to the format occurs. + if flags.Deterministic || randBool { + // Use regular spaces (U+0020). + switch d { + case diffUnknown, diffIdentical: + b = append(b, " "...) + case diffRemoved: + b = append(b, "- "...) + case diffInserted: + b = append(b, "+ "...) + } + } else { + // Use non-breaking spaces (U+00a0). + switch d { + case diffUnknown, diffIdentical: + b = append(b, "  "...) + case diffRemoved: + b = append(b, "- "...) + case diffInserted: + b = append(b, "+ "...) + } + } + return repeatCount(n).appendChar(b, '\t') +} + +type repeatCount int + +func (n repeatCount) appendChar(b []byte, c byte) []byte { + for ; n > 0; n-- { + b = append(b, c) + } + return b +} + +// textNode is a simplified tree-based representation of structured text. +// Possible node types are textWrap, textList, or textLine. +type textNode interface { + // Len reports the length in bytes of a single-line version of the tree. + // Nested textRecord.Diff and textRecord.Comment fields are ignored. + Len() int + // Equal reports whether the two trees are structurally identical. + // Nested textRecord.Diff and textRecord.Comment fields are compared. + Equal(textNode) bool + // String returns the string representation of the text tree. + // It is not guaranteed that len(x.String()) == x.Len(), + // nor that x.String() == y.String() implies that x.Equal(y). + String() string + + // formatCompactTo formats the contents of the tree as a single-line string + // to the provided buffer. Any nested textRecord.Diff and textRecord.Comment + // fields are ignored. + // + // However, not all nodes in the tree should be collapsed as a single-line. + // If a node can be collapsed as a single-line, it is replaced by a textLine + // node. Since the top-level node cannot replace itself, this also returns + // the current node itself. + // + // This does not mutate the receiver. + formatCompactTo([]byte, diffMode) ([]byte, textNode) + // formatExpandedTo formats the contents of the tree as a multi-line string + // to the provided buffer. In order for column alignment to operate well, + // formatCompactTo must be called before calling formatExpandedTo. + formatExpandedTo([]byte, diffMode, indentMode) []byte +} + +// textWrap is a wrapper that concatenates a prefix and/or a suffix +// to the underlying node. +type textWrap struct { + Prefix string // e.g., "bytes.Buffer{" + Value textNode // textWrap | textList | textLine + Suffix string // e.g., "}" + Metadata interface{} // arbitrary metadata; has no effect on formatting +} + +func (s *textWrap) Len() int { + return len(s.Prefix) + s.Value.Len() + len(s.Suffix) +} +func (s1 *textWrap) Equal(s2 textNode) bool { + if s2, ok := s2.(*textWrap); ok { + return s1.Prefix == s2.Prefix && s1.Value.Equal(s2.Value) && s1.Suffix == s2.Suffix + } + return false +} +func (s *textWrap) String() string { + var d diffMode + var n indentMode + _, s2 := s.formatCompactTo(nil, d) + b := n.appendIndent(nil, d) // Leading indent + b = s2.formatExpandedTo(b, d, n) // Main body + b = append(b, '\n') // Trailing newline + return string(b) +} +func (s *textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { + n0 := len(b) // Original buffer length + b = append(b, s.Prefix...) + b, s.Value = s.Value.formatCompactTo(b, d) + b = append(b, s.Suffix...) + if _, ok := s.Value.(textLine); ok { + return b, textLine(b[n0:]) + } + return b, s +} +func (s *textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { + b = append(b, s.Prefix...) + b = s.Value.formatExpandedTo(b, d, n) + b = append(b, s.Suffix...) + return b +} + +// textList is a comma-separated list of textWrap or textLine nodes. +// The list may be formatted as multi-lines or single-line at the discretion +// of the textList.formatCompactTo method. +type textList []textRecord +type textRecord struct { + Diff diffMode // e.g., 0 or '-' or '+' + Key string // e.g., "MyField" + Value textNode // textWrap | textLine + ElideComma bool // avoid trailing comma + Comment fmt.Stringer // e.g., "6 identical fields" +} + +// AppendEllipsis appends a new ellipsis node to the list if none already +// exists at the end. If cs is non-zero it coalesces the statistics with the +// previous diffStats. +func (s *textList) AppendEllipsis(ds diffStats) { + hasStats := !ds.IsZero() + if len(*s) == 0 || !(*s)[len(*s)-1].Value.Equal(textEllipsis) { + if hasStats { + *s = append(*s, textRecord{Value: textEllipsis, ElideComma: true, Comment: ds}) + } else { + *s = append(*s, textRecord{Value: textEllipsis, ElideComma: true}) + } + return + } + if hasStats { + (*s)[len(*s)-1].Comment = (*s)[len(*s)-1].Comment.(diffStats).Append(ds) + } +} + +func (s textList) Len() (n int) { + for i, r := range s { + n += len(r.Key) + if r.Key != "" { + n += len(": ") + } + n += r.Value.Len() + if i < len(s)-1 { + n += len(", ") + } + } + return n +} + +func (s1 textList) Equal(s2 textNode) bool { + if s2, ok := s2.(textList); ok { + if len(s1) != len(s2) { + return false + } + for i := range s1 { + r1, r2 := s1[i], s2[i] + if !(r1.Diff == r2.Diff && r1.Key == r2.Key && r1.Value.Equal(r2.Value) && r1.Comment == r2.Comment) { + return false + } + } + return true + } + return false +} + +func (s textList) String() string { + return (&textWrap{Prefix: "{", Value: s, Suffix: "}"}).String() +} + +func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { + s = append(textList(nil), s...) // Avoid mutating original + + // Determine whether we can collapse this list as a single line. + n0 := len(b) // Original buffer length + var multiLine bool + for i, r := range s { + if r.Diff == diffInserted || r.Diff == diffRemoved { + multiLine = true + } + b = append(b, r.Key...) + if r.Key != "" { + b = append(b, ": "...) + } + b, s[i].Value = r.Value.formatCompactTo(b, d|r.Diff) + if _, ok := s[i].Value.(textLine); !ok { + multiLine = true + } + if r.Comment != nil { + multiLine = true + } + if i < len(s)-1 { + b = append(b, ", "...) + } + } + // Force multi-lined output when printing a removed/inserted node that + // is sufficiently long. + if (d == diffInserted || d == diffRemoved) && len(b[n0:]) > maxColumnLength { + multiLine = true + } + if !multiLine { + return b, textLine(b[n0:]) + } + return b, s +} + +func (s textList) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { + alignKeyLens := s.alignLens( + func(r textRecord) bool { + _, isLine := r.Value.(textLine) + return r.Key == "" || !isLine + }, + func(r textRecord) int { return utf8.RuneCountInString(r.Key) }, + ) + alignValueLens := s.alignLens( + func(r textRecord) bool { + _, isLine := r.Value.(textLine) + return !isLine || r.Value.Equal(textEllipsis) || r.Comment == nil + }, + func(r textRecord) int { return utf8.RuneCount(r.Value.(textLine)) }, + ) + + // Format lists of simple lists in a batched form. + // If the list is sequence of only textLine values, + // then batch multiple values on a single line. + var isSimple bool + for _, r := range s { + _, isLine := r.Value.(textLine) + isSimple = r.Diff == 0 && r.Key == "" && isLine && r.Comment == nil + if !isSimple { + break + } + } + if isSimple { + n++ + var batch []byte + emitBatch := func() { + if len(batch) > 0 { + b = n.appendIndent(append(b, '\n'), d) + b = append(b, bytes.TrimRight(batch, " ")...) + batch = batch[:0] + } + } + for _, r := range s { + line := r.Value.(textLine) + if len(batch)+len(line)+len(", ") > maxColumnLength { + emitBatch() + } + batch = append(batch, line...) + batch = append(batch, ", "...) + } + emitBatch() + n-- + return n.appendIndent(append(b, '\n'), d) + } + + // Format the list as a multi-lined output. + n++ + for i, r := range s { + b = n.appendIndent(append(b, '\n'), d|r.Diff) + if r.Key != "" { + b = append(b, r.Key+": "...) + } + b = alignKeyLens[i].appendChar(b, ' ') + + b = r.Value.formatExpandedTo(b, d|r.Diff, n) + if !r.ElideComma { + b = append(b, ',') + } + b = alignValueLens[i].appendChar(b, ' ') + + if r.Comment != nil { + b = append(b, " // "+r.Comment.String()...) + } + } + n-- + + return n.appendIndent(append(b, '\n'), d) +} + +func (s textList) alignLens( + skipFunc func(textRecord) bool, + lenFunc func(textRecord) int, +) []repeatCount { + var startIdx, endIdx, maxLen int + lens := make([]repeatCount, len(s)) + for i, r := range s { + if skipFunc(r) { + for j := startIdx; j < endIdx && j < len(s); j++ { + lens[j] = repeatCount(maxLen - lenFunc(s[j])) + } + startIdx, endIdx, maxLen = i+1, i+1, 0 + } else { + if maxLen < lenFunc(r) { + maxLen = lenFunc(r) + } + endIdx = i + 1 + } + } + for j := startIdx; j < endIdx && j < len(s); j++ { + lens[j] = repeatCount(maxLen - lenFunc(s[j])) + } + return lens +} + +// textLine is a single-line segment of text and is always a leaf node +// in the textNode tree. +type textLine []byte + +var ( + textNil = textLine("nil") + textEllipsis = textLine("...") +) + +func (s textLine) Len() int { + return len(s) +} +func (s1 textLine) Equal(s2 textNode) bool { + if s2, ok := s2.(textLine); ok { + return bytes.Equal([]byte(s1), []byte(s2)) + } + return false +} +func (s textLine) String() string { + return string(s) +} +func (s textLine) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { + return append(b, s...), s +} +func (s textLine) formatExpandedTo(b []byte, _ diffMode, _ indentMode) []byte { + return append(b, s...) +} + +type diffStats struct { + Name string + NumIgnored int + NumIdentical int + NumRemoved int + NumInserted int + NumModified int +} + +func (s diffStats) IsZero() bool { + s.Name = "" + return s == diffStats{} +} + +func (s diffStats) NumDiff() int { + return s.NumRemoved + s.NumInserted + s.NumModified +} + +func (s diffStats) Append(ds diffStats) diffStats { + assert(s.Name == ds.Name) + s.NumIgnored += ds.NumIgnored + s.NumIdentical += ds.NumIdentical + s.NumRemoved += ds.NumRemoved + s.NumInserted += ds.NumInserted + s.NumModified += ds.NumModified + return s +} + +// String prints a humanly-readable summary of coalesced records. +// +// Example: +// +// diffStats{Name: "Field", NumIgnored: 5}.String() => "5 ignored fields" +func (s diffStats) String() string { + var ss []string + var sum int + labels := [...]string{"ignored", "identical", "removed", "inserted", "modified"} + counts := [...]int{s.NumIgnored, s.NumIdentical, s.NumRemoved, s.NumInserted, s.NumModified} + for i, n := range counts { + if n > 0 { + ss = append(ss, fmt.Sprintf("%d %v", n, labels[i])) + } + sum += n + } + + // Pluralize the name (adjusting for some obscure English grammar rules). + name := s.Name + if sum > 1 { + name += "s" + if strings.HasSuffix(name, "ys") { + name = name[:len(name)-2] + "ies" // e.g., "entrys" => "entries" + } + } + + // Format the list according to English grammar (with Oxford comma). + switch n := len(ss); n { + case 0: + return "" + case 1, 2: + return strings.Join(ss, " and ") + " " + name + default: + return strings.Join(ss[:n-1], ", ") + ", and " + ss[n-1] + " " + name + } +} + +type commentString string + +func (s commentString) String() string { return string(s) } diff --git a/vendor/github.com/google/go-cmp/cmp/report_value.go b/vendor/github.com/google/go-cmp/cmp/report_value.go new file mode 100644 index 00000000..668d470f --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_value.go @@ -0,0 +1,121 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmp + +import "reflect" + +// valueNode represents a single node within a report, which is a +// structured representation of the value tree, containing information +// regarding which nodes are equal or not. +type valueNode struct { + parent *valueNode + + Type reflect.Type + ValueX reflect.Value + ValueY reflect.Value + + // NumSame is the number of leaf nodes that are equal. + // All descendants are equal only if NumDiff is 0. + NumSame int + // NumDiff is the number of leaf nodes that are not equal. + NumDiff int + // NumIgnored is the number of leaf nodes that are ignored. + NumIgnored int + // NumCompared is the number of leaf nodes that were compared + // using an Equal method or Comparer function. + NumCompared int + // NumTransformed is the number of non-leaf nodes that were transformed. + NumTransformed int + // NumChildren is the number of transitive descendants of this node. + // This counts from zero; thus, leaf nodes have no descendants. + NumChildren int + // MaxDepth is the maximum depth of the tree. This counts from zero; + // thus, leaf nodes have a depth of zero. + MaxDepth int + + // Records is a list of struct fields, slice elements, or map entries. + Records []reportRecord // If populated, implies Value is not populated + + // Value is the result of a transformation, pointer indirect, of + // type assertion. + Value *valueNode // If populated, implies Records is not populated + + // TransformerName is the name of the transformer. + TransformerName string // If non-empty, implies Value is populated +} +type reportRecord struct { + Key reflect.Value // Invalid for slice element + Value *valueNode +} + +func (parent *valueNode) PushStep(ps PathStep) (child *valueNode) { + vx, vy := ps.Values() + child = &valueNode{parent: parent, Type: ps.Type(), ValueX: vx, ValueY: vy} + switch s := ps.(type) { + case StructField: + assert(parent.Value == nil) + parent.Records = append(parent.Records, reportRecord{Key: reflect.ValueOf(s.Name()), Value: child}) + case SliceIndex: + assert(parent.Value == nil) + parent.Records = append(parent.Records, reportRecord{Value: child}) + case MapIndex: + assert(parent.Value == nil) + parent.Records = append(parent.Records, reportRecord{Key: s.Key(), Value: child}) + case Indirect: + assert(parent.Value == nil && parent.Records == nil) + parent.Value = child + case TypeAssertion: + assert(parent.Value == nil && parent.Records == nil) + parent.Value = child + case Transform: + assert(parent.Value == nil && parent.Records == nil) + parent.Value = child + parent.TransformerName = s.Name() + parent.NumTransformed++ + default: + assert(parent == nil) // Must be the root step + } + return child +} + +func (r *valueNode) Report(rs Result) { + assert(r.MaxDepth == 0) // May only be called on leaf nodes + + if rs.ByIgnore() { + r.NumIgnored++ + } else { + if rs.Equal() { + r.NumSame++ + } else { + r.NumDiff++ + } + } + assert(r.NumSame+r.NumDiff+r.NumIgnored == 1) + + if rs.ByMethod() { + r.NumCompared++ + } + if rs.ByFunc() { + r.NumCompared++ + } + assert(r.NumCompared <= 1) +} + +func (child *valueNode) PopStep() (parent *valueNode) { + if child.parent == nil { + return nil + } + parent = child.parent + parent.NumSame += child.NumSame + parent.NumDiff += child.NumDiff + parent.NumIgnored += child.NumIgnored + parent.NumCompared += child.NumCompared + parent.NumTransformed += child.NumTransformed + parent.NumChildren += child.NumChildren + 1 + if parent.MaxDepth < child.MaxDepth+1 { + parent.MaxDepth = child.MaxDepth + 1 + } + return parent +} diff --git a/vendor/github.com/hashicorp/go-plugin/CHANGELOG.md b/vendor/github.com/hashicorp/go-plugin/CHANGELOG.md new file mode 100644 index 00000000..4c2c0e64 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/CHANGELOG.md @@ -0,0 +1,41 @@ +## v1.4.9 + +ENHANCEMENTS: + +* client: Remove log warning introduced in 1.4.5 when SecureConfig is nil. [[GH-238](https://github.com/hashicorp/go-plugin/pull/238)] + +## v1.4.8 + +BUG FIXES: + +* Fix windows build: [[GH-227](https://github.com/hashicorp/go-plugin/pull/227)] + +## v1.4.7 + +ENHANCEMENTS: + +* More detailed error message on plugin start failure: [[GH-223](https://github.com/hashicorp/go-plugin/pull/223)] + +## v1.4.6 + +BUG FIXES: + +* server: Prevent gRPC broker goroutine leak when using `GRPCServer` type `GracefulStop()` or `Stop()` methods [[GH-220](https://github.com/hashicorp/go-plugin/pull/220)] + +## v1.4.5 + +ENHANCEMENTS: + +* client: log warning when SecureConfig is nil [[GH-207](https://github.com/hashicorp/go-plugin/pull/207)] + + +## v1.4.4 + +ENHANCEMENTS: + +* client: increase level of plugin exit logs [[GH-195](https://github.com/hashicorp/go-plugin/pull/195)] + +BUG FIXES: + +* Bidirectional communication: fix bidirectional communication when AutoMTLS is enabled [[GH-193](https://github.com/hashicorp/go-plugin/pull/193)] +* RPC: Trim a spurious log message for plugins using RPC [[GH-186](https://github.com/hashicorp/go-plugin/pull/186)] diff --git a/vendor/github.com/hashicorp/go-plugin/LICENSE b/vendor/github.com/hashicorp/go-plugin/LICENSE index 82b4de97..042324fb 100644 --- a/vendor/github.com/hashicorp/go-plugin/LICENSE +++ b/vendor/github.com/hashicorp/go-plugin/LICENSE @@ -1,3 +1,5 @@ +Copyright (c) 2016 HashiCorp, Inc. + Mozilla Public License, version 2.0 1. Definitions diff --git a/vendor/github.com/hashicorp/go-plugin/README.md b/vendor/github.com/hashicorp/go-plugin/README.md index 46ee09fc..39391f24 100644 --- a/vendor/github.com/hashicorp/go-plugin/README.md +++ b/vendor/github.com/hashicorp/go-plugin/README.md @@ -3,8 +3,9 @@ `go-plugin` is a Go (golang) plugin system over RPC. It is the plugin system that has been in use by HashiCorp tooling for over 4 years. While initially created for [Packer](https://www.packer.io), it is additionally in use by -[Terraform](https://www.terraform.io), [Nomad](https://www.nomadproject.io), and -[Vault](https://www.vaultproject.io). +[Terraform](https://www.terraform.io), [Nomad](https://www.nomadproject.io), +[Vault](https://www.vaultproject.io), and +[Boundary](https://www.boundaryproject.io). While the plugin system is over RPC, it is currently only designed to work over a local [reliable] network. Plugins over a real network are not supported diff --git a/vendor/github.com/hashicorp/go-plugin/client.go b/vendor/github.com/hashicorp/go-plugin/client.go index 67dca883..a6a9ffa2 100644 --- a/vendor/github.com/hashicorp/go-plugin/client.go +++ b/vendor/github.com/hashicorp/go-plugin/client.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin import ( @@ -26,6 +29,14 @@ import ( "google.golang.org/grpc" ) +const unrecognizedRemotePluginMessage = `Unrecognized remote plugin message: %s +This usually means + the plugin was not compiled for this architecture, + the plugin is missing dynamic-link libraries necessary to run, + the plugin is not executable by this process due to file permissions, or + the plugin failed to negotiate the initial go-plugin protocol handshake +%s` + // If this is 1, then we've called CleanupClients. This can be used // by plugin RPC implementations to change error behavior since you // can expected network connection errors at this point. This should be @@ -473,7 +484,17 @@ func (c *Client) Kill() { c.l.Unlock() } -// Starts the underlying subprocess, communicating with it to negotiate +// peTypes is a list of Portable Executable (PE) machine types from https://learn.microsoft.com/en-us/windows/win32/debug/pe-format +// mapped to GOARCH types. It is not comprehensive, and only includes machine types that Go supports. +var peTypes = map[uint16]string{ + 0x14c: "386", + 0x1c0: "arm", + 0x6264: "loong64", + 0x8664: "amd64", + 0xaa64: "arm64", +} + +// Start the underlying subprocess, communicating with it to negotiate // a port for RPC connections, and returning the address to connect via RPC. // // This method is safe to call multiple times. Subsequent calls have no effect. @@ -574,6 +595,8 @@ func (c *Client) Start() (addr net.Addr, err error) { c.config.TLSConfig = &tls.Config{ Certificates: []tls.Certificate{cert}, + ClientAuth: tls.RequireAndVerifyClientCert, + MinVersion: tls.VersionTLS12, ServerName: "localhost", } } @@ -629,17 +652,19 @@ func (c *Client) Start() (addr net.Addr, err error) { // Wait for the command to end. err := cmd.Wait() - debugMsgArgs := []interface{}{ + msgArgs := []interface{}{ "path", path, "pid", pid, } if err != nil { - debugMsgArgs = append(debugMsgArgs, + msgArgs = append(msgArgs, []interface{}{"error", err.Error()}...) + c.logger.Error("plugin process exited", msgArgs...) + } else { + // Log and make sure to flush the logs right away + c.logger.Info("plugin process exited", msgArgs...) } - // Log and make sure to flush the logs write away - c.logger.Debug("plugin process exited", debugMsgArgs...) os.Stderr.Sync() // Set that we exited, which takes a lock @@ -691,10 +716,7 @@ func (c *Client) Start() (addr net.Addr, err error) { line = strings.TrimSpace(line) parts := strings.SplitN(line, "|", 6) if len(parts) < 4 { - err = fmt.Errorf( - "Unrecognized remote plugin message: %s\n\n"+ - "This usually means that the plugin is either invalid or simply\n"+ - "needs to be recompiled to support the latest protocol.", line) + err = fmt.Errorf(unrecognizedRemotePluginMessage, line, additionalNotesAboutCommand(cmd.Path)) return } @@ -774,7 +796,7 @@ func (c *Client) Start() (addr net.Addr, err error) { } // loadServerCert is used by AutoMTLS to read an x.509 cert returned by the -// server, and load it as the RootCA for the client TLSConfig. +// server, and load it as the RootCA and ClientCA for the client TLSConfig. func (c *Client) loadServerCert(cert string) error { certPool := x509.NewCertPool() @@ -791,6 +813,7 @@ func (c *Client) loadServerCert(cert string) error { certPool.AddCert(x509Cert) c.config.TLSConfig.RootCAs = certPool + c.config.TLSConfig.ClientCAs = certPool return nil } diff --git a/vendor/github.com/hashicorp/go-plugin/discover.go b/vendor/github.com/hashicorp/go-plugin/discover.go index d22c566e..c5b96242 100644 --- a/vendor/github.com/hashicorp/go-plugin/discover.go +++ b/vendor/github.com/hashicorp/go-plugin/discover.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin import ( diff --git a/vendor/github.com/hashicorp/go-plugin/error.go b/vendor/github.com/hashicorp/go-plugin/error.go index 22a7baa6..e62a2191 100644 --- a/vendor/github.com/hashicorp/go-plugin/error.go +++ b/vendor/github.com/hashicorp/go-plugin/error.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin // This is a type that wraps error types so that they can be messaged diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_broker.go b/vendor/github.com/hashicorp/go-plugin/grpc_broker.go index daf142d1..9bf56776 100644 --- a/vendor/github.com/hashicorp/go-plugin/grpc_broker.go +++ b/vendor/github.com/hashicorp/go-plugin/grpc_broker.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin import ( diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_client.go b/vendor/github.com/hashicorp/go-plugin/grpc_client.go index 842903c9..b0592cb5 100644 --- a/vendor/github.com/hashicorp/go-plugin/grpc_client.go +++ b/vendor/github.com/hashicorp/go-plugin/grpc_client.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin import ( diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_controller.go b/vendor/github.com/hashicorp/go-plugin/grpc_controller.go index 1a8a8e70..2085356c 100644 --- a/vendor/github.com/hashicorp/go-plugin/grpc_controller.go +++ b/vendor/github.com/hashicorp/go-plugin/grpc_controller.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin import ( diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_server.go b/vendor/github.com/hashicorp/go-plugin/grpc_server.go index 387628bf..7203a2cf 100644 --- a/vendor/github.com/hashicorp/go-plugin/grpc_server.go +++ b/vendor/github.com/hashicorp/go-plugin/grpc_server.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin import ( @@ -107,14 +110,26 @@ func (s *GRPCServer) Init() error { return nil } -// Stop calls Stop on the underlying grpc.Server +// Stop calls Stop on the underlying grpc.Server and Close on the underlying +// grpc.Broker if present. func (s *GRPCServer) Stop() { s.server.Stop() + + if s.broker != nil { + s.broker.Close() + s.broker = nil + } } -// GracefulStop calls GracefulStop on the underlying grpc.Server +// GracefulStop calls GracefulStop on the underlying grpc.Server and Close on +// the underlying grpc.Broker if present. func (s *GRPCServer) GracefulStop() { s.server.GracefulStop() + + if s.broker != nil { + s.broker.Close() + s.broker = nil + } } // Config is the GRPCServerConfig encoded as JSON then base64. diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_stdio.go b/vendor/github.com/hashicorp/go-plugin/grpc_stdio.go index a5821815..ae06c116 100644 --- a/vendor/github.com/hashicorp/go-plugin/grpc_stdio.go +++ b/vendor/github.com/hashicorp/go-plugin/grpc_stdio.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin import ( diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go index fb9d4152..a3b5fb12 100644 --- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + //go:generate protoc -I ./ ./grpc_broker.proto ./grpc_controller.proto ./grpc_stdio.proto --go_out=plugins=grpc:. package plugin diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto index aa3df463..038423de 100644 --- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + syntax = "proto3"; package plugin; option go_package = "plugin"; diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto index 345d0a1c..3157eb88 100644 --- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + syntax = "proto3"; package plugin; option go_package = "plugin"; diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto index ce1a1223..1c0d1d05 100644 --- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + syntax = "proto3"; package plugin; option go_package = "plugin"; diff --git a/vendor/github.com/hashicorp/go-plugin/log_entry.go b/vendor/github.com/hashicorp/go-plugin/log_entry.go index fb2ef930..ab963d56 100644 --- a/vendor/github.com/hashicorp/go-plugin/log_entry.go +++ b/vendor/github.com/hashicorp/go-plugin/log_entry.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin import ( diff --git a/vendor/github.com/hashicorp/go-plugin/mtls.go b/vendor/github.com/hashicorp/go-plugin/mtls.go index 88955245..09ecafaf 100644 --- a/vendor/github.com/hashicorp/go-plugin/mtls.go +++ b/vendor/github.com/hashicorp/go-plugin/mtls.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin import ( diff --git a/vendor/github.com/hashicorp/go-plugin/mux_broker.go b/vendor/github.com/hashicorp/go-plugin/mux_broker.go index 01c45ad7..4eb1208f 100644 --- a/vendor/github.com/hashicorp/go-plugin/mux_broker.go +++ b/vendor/github.com/hashicorp/go-plugin/mux_broker.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin import ( diff --git a/vendor/github.com/hashicorp/go-plugin/notes_unix.go b/vendor/github.com/hashicorp/go-plugin/notes_unix.go new file mode 100644 index 00000000..9a0e5c50 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/notes_unix.go @@ -0,0 +1,67 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build !windows +// +build !windows + +package plugin + +import ( + "debug/elf" + "debug/macho" + "debug/pe" + "fmt" + "os" + "os/user" + "runtime" + "strconv" + "syscall" +) + +// additionalNotesAboutCommand tries to get additional information about a command that might help diagnose +// why it won't run correctly. It runs as a best effort only. +func additionalNotesAboutCommand(path string) string { + notes := "" + stat, err := os.Stat(path) + if err != nil { + return notes + } + + notes += "\nAdditional notes about plugin:\n" + notes += fmt.Sprintf(" Path: %s\n", path) + notes += fmt.Sprintf(" Mode: %s\n", stat.Mode()) + statT, ok := stat.Sys().(*syscall.Stat_t) + if ok { + currentUsername := "?" + if u, err := user.LookupId(strconv.FormatUint(uint64(os.Getuid()), 10)); err == nil { + currentUsername = u.Username + } + currentGroup := "?" + if g, err := user.LookupGroupId(strconv.FormatUint(uint64(os.Getgid()), 10)); err == nil { + currentGroup = g.Name + } + username := "?" + if u, err := user.LookupId(strconv.FormatUint(uint64(statT.Uid), 10)); err == nil { + username = u.Username + } + group := "?" + if g, err := user.LookupGroupId(strconv.FormatUint(uint64(statT.Gid), 10)); err == nil { + group = g.Name + } + notes += fmt.Sprintf(" Owner: %d [%s] (current: %d [%s])\n", statT.Uid, username, os.Getuid(), currentUsername) + notes += fmt.Sprintf(" Group: %d [%s] (current: %d [%s])\n", statT.Gid, group, os.Getgid(), currentGroup) + } + + if elfFile, err := elf.Open(path); err == nil { + notes += fmt.Sprintf(" ELF architecture: %s (current architecture: %s)\n", elfFile.Machine, runtime.GOARCH) + } else if machoFile, err := macho.Open(path); err == nil { + notes += fmt.Sprintf(" MachO architecture: %s (current architecture: %s)\n", machoFile.Cpu, runtime.GOARCH) + } else if peFile, err := pe.Open(path); err == nil { + machine, ok := peTypes[peFile.Machine] + if !ok { + machine = "unknown" + } + notes += fmt.Sprintf(" PE architecture: %s (current architecture: %s)\n", machine, runtime.GOARCH) + } + return notes +} diff --git a/vendor/github.com/hashicorp/go-plugin/notes_windows.go b/vendor/github.com/hashicorp/go-plugin/notes_windows.go new file mode 100644 index 00000000..15680850 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/notes_windows.go @@ -0,0 +1,43 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build windows +// +build windows + +package plugin + +import ( + "debug/elf" + "debug/macho" + "debug/pe" + "fmt" + "os" + "runtime" +) + +// additionalNotesAboutCommand tries to get additional information about a command that might help diagnose +// why it won't run correctly. It runs as a best effort only. +func additionalNotesAboutCommand(path string) string { + notes := "" + stat, err := os.Stat(path) + if err != nil { + return notes + } + + notes += "\nAdditional notes about plugin:\n" + notes += fmt.Sprintf(" Path: %s\n", path) + notes += fmt.Sprintf(" Mode: %s\n", stat.Mode()) + + if elfFile, err := elf.Open(path); err == nil { + notes += fmt.Sprintf(" ELF architecture: %s (current architecture: %s)\n", elfFile.Machine, runtime.GOARCH) + } else if machoFile, err := macho.Open(path); err == nil { + notes += fmt.Sprintf(" MachO architecture: %s (current architecture: %s)\n", machoFile.Cpu, runtime.GOARCH) + } else if peFile, err := pe.Open(path); err == nil { + machine, ok := peTypes[peFile.Machine] + if !ok { + machine = "unknown" + } + notes += fmt.Sprintf(" PE architecture: %s (current architecture: %s)\n", machine, runtime.GOARCH) + } + return notes +} diff --git a/vendor/github.com/hashicorp/go-plugin/plugin.go b/vendor/github.com/hashicorp/go-plugin/plugin.go index 79d96746..184749b9 100644 --- a/vendor/github.com/hashicorp/go-plugin/plugin.go +++ b/vendor/github.com/hashicorp/go-plugin/plugin.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // The plugin package exposes functions and helpers for communicating to // plugins which are implemented as standalone binary applications. // diff --git a/vendor/github.com/hashicorp/go-plugin/process.go b/vendor/github.com/hashicorp/go-plugin/process.go index 88c999a5..68b028c6 100644 --- a/vendor/github.com/hashicorp/go-plugin/process.go +++ b/vendor/github.com/hashicorp/go-plugin/process.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin import ( diff --git a/vendor/github.com/hashicorp/go-plugin/process_posix.go b/vendor/github.com/hashicorp/go-plugin/process_posix.go index 70ba546b..b73a3607 100644 --- a/vendor/github.com/hashicorp/go-plugin/process_posix.go +++ b/vendor/github.com/hashicorp/go-plugin/process_posix.go @@ -1,3 +1,7 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build !windows // +build !windows package plugin diff --git a/vendor/github.com/hashicorp/go-plugin/process_windows.go b/vendor/github.com/hashicorp/go-plugin/process_windows.go index 0eaa7705..ffa9b9e0 100644 --- a/vendor/github.com/hashicorp/go-plugin/process_windows.go +++ b/vendor/github.com/hashicorp/go-plugin/process_windows.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin import ( diff --git a/vendor/github.com/hashicorp/go-plugin/protocol.go b/vendor/github.com/hashicorp/go-plugin/protocol.go index 0cfc19e5..e4b7be38 100644 --- a/vendor/github.com/hashicorp/go-plugin/protocol.go +++ b/vendor/github.com/hashicorp/go-plugin/protocol.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin import ( diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_client.go b/vendor/github.com/hashicorp/go-plugin/rpc_client.go index f30a4b1d..142454df 100644 --- a/vendor/github.com/hashicorp/go-plugin/rpc_client.go +++ b/vendor/github.com/hashicorp/go-plugin/rpc_client.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin import ( diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_server.go b/vendor/github.com/hashicorp/go-plugin/rpc_server.go index 5bb18dd5..cec0a3d9 100644 --- a/vendor/github.com/hashicorp/go-plugin/rpc_server.go +++ b/vendor/github.com/hashicorp/go-plugin/rpc_server.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin import ( @@ -42,10 +45,16 @@ func (s *RPCServer) Config() string { return "" } // ServerProtocol impl. func (s *RPCServer) Serve(lis net.Listener) { + defer s.done() + for { conn, err := lis.Accept() if err != nil { - log.Printf("[ERR] plugin: plugin server: %s", err) + severity := "ERR" + if errors.Is(err, net.ErrClosed) { + severity = "DEBUG" + } + log.Printf("[%s] plugin: plugin server: %s", severity, err) return } @@ -78,7 +87,7 @@ func (s *RPCServer) ServeConn(conn io.ReadWriteCloser) { // Connect the stdstreams (in, out, err) stdstream := make([]net.Conn, 2) - for i, _ := range stdstream { + for i := range stdstream { stdstream[i], err = mux.Accept() if err != nil { mux.Close() @@ -129,13 +138,15 @@ type controlServer struct { // Ping can be called to verify the connection (and likely the binary) // is still alive to a plugin. func (c *controlServer) Ping( - null bool, response *struct{}) error { + null bool, response *struct{}, +) error { *response = struct{}{} return nil } func (c *controlServer) Quit( - null bool, response *struct{}) error { + null bool, response *struct{}, +) error { // End the server c.server.done() @@ -152,7 +163,8 @@ type dispenseServer struct { } func (d *dispenseServer) Dispense( - name string, response *uint32) error { + name string, response *uint32, +) error { // Find the function to create this implementation p, ok := d.plugins[name] if !ok { diff --git a/vendor/github.com/hashicorp/go-plugin/server.go b/vendor/github.com/hashicorp/go-plugin/server.go index 7a58cc39..3f4a017d 100644 --- a/vendor/github.com/hashicorp/go-plugin/server.go +++ b/vendor/github.com/hashicorp/go-plugin/server.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin import ( @@ -304,13 +307,13 @@ func Serve(opts *ServeConfig) { certPEM, keyPEM, err := generateCert() if err != nil { - logger.Error("failed to generate client certificate", "error", err) + logger.Error("failed to generate server certificate", "error", err) panic(err) } cert, err := tls.X509KeyPair(certPEM, keyPEM) if err != nil { - logger.Error("failed to parse client certificate", "error", err) + logger.Error("failed to parse server certificate", "error", err) panic(err) } @@ -319,6 +322,8 @@ func Serve(opts *ServeConfig) { ClientAuth: tls.RequireAndVerifyClientCert, ClientCAs: clientCertPool, MinVersion: tls.VersionTLS12, + RootCAs: clientCertPool, + ServerName: "localhost", } // We send back the raw leaf cert data for the client rather than the diff --git a/vendor/github.com/hashicorp/go-plugin/server_mux.go b/vendor/github.com/hashicorp/go-plugin/server_mux.go index 033079ea..6b14b0c2 100644 --- a/vendor/github.com/hashicorp/go-plugin/server_mux.go +++ b/vendor/github.com/hashicorp/go-plugin/server_mux.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin import ( diff --git a/vendor/github.com/hashicorp/go-plugin/stream.go b/vendor/github.com/hashicorp/go-plugin/stream.go index 1d547aaa..a2348642 100644 --- a/vendor/github.com/hashicorp/go-plugin/stream.go +++ b/vendor/github.com/hashicorp/go-plugin/stream.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin import ( diff --git a/vendor/github.com/hashicorp/go-plugin/testing.go b/vendor/github.com/hashicorp/go-plugin/testing.go index e36f2eb2..ffe6fa46 100644 --- a/vendor/github.com/hashicorp/go-plugin/testing.go +++ b/vendor/github.com/hashicorp/go-plugin/testing.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin import ( diff --git a/vendor/github.com/hashicorp/go-secure-stdlib/tlsutil/tlsutil.go b/vendor/github.com/hashicorp/go-secure-stdlib/tlsutil/tlsutil.go index 3cf9a2ec..165c427b 100644 --- a/vendor/github.com/hashicorp/go-secure-stdlib/tlsutil/tlsutil.go +++ b/vendor/github.com/hashicorp/go-secure-stdlib/tlsutil/tlsutil.go @@ -25,31 +25,33 @@ var TLSLookup = map[string]uint16{ // cipherMap maps the cipher suite names to the internal cipher suite code. var cipherMap = map[string]uint16{ - "TLS_RSA_WITH_RC4_128_SHA": tls.TLS_RSA_WITH_RC4_128_SHA, - "TLS_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, - "TLS_RSA_WITH_AES_128_CBC_SHA": tls.TLS_RSA_WITH_AES_128_CBC_SHA, - "TLS_RSA_WITH_AES_256_CBC_SHA": tls.TLS_RSA_WITH_AES_256_CBC_SHA, - "TLS_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_RSA_WITH_AES_128_CBC_SHA256, - "TLS_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_RSA_WITH_AES_128_GCM_SHA256, - "TLS_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_RSA_WITH_AES_256_GCM_SHA384, - "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, - "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - "TLS_ECDHE_RSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, - "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, - "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, - "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, - "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, - "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, - "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, - "TLS_AES_128_GCM_SHA256": tls.TLS_AES_128_GCM_SHA256, - "TLS_AES_256_GCM_SHA384": tls.TLS_AES_256_GCM_SHA384, - "TLS_CHACHA20_POLY1305_SHA256": tls.TLS_CHACHA20_POLY1305_SHA256, + "TLS_RSA_WITH_RC4_128_SHA": tls.TLS_RSA_WITH_RC4_128_SHA, + "TLS_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, + "TLS_RSA_WITH_AES_128_CBC_SHA": tls.TLS_RSA_WITH_AES_128_CBC_SHA, + "TLS_RSA_WITH_AES_256_CBC_SHA": tls.TLS_RSA_WITH_AES_256_CBC_SHA, + "TLS_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_RSA_WITH_AES_128_CBC_SHA256, + "TLS_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_RSA_WITH_AES_128_GCM_SHA256, + "TLS_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_RSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + "TLS_ECDHE_RSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, + "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + "TLS_AES_128_GCM_SHA256": tls.TLS_AES_128_GCM_SHA256, + "TLS_AES_256_GCM_SHA384": tls.TLS_AES_256_GCM_SHA384, + "TLS_CHACHA20_POLY1305_SHA256": tls.TLS_CHACHA20_POLY1305_SHA256, } // ParseCiphers parse ciphersuites from the comma-separated string into recognized slice diff --git a/vendor/github.com/hashicorp/go-set/LICENSE b/vendor/github.com/hashicorp/go-set/LICENSE index a612ad98..1786756f 100644 --- a/vendor/github.com/hashicorp/go-set/LICENSE +++ b/vendor/github.com/hashicorp/go-set/LICENSE @@ -1,3 +1,5 @@ +Copyright (c) 2022 HashiCorp, Inc. + Mozilla Public License Version 2.0 ================================== diff --git a/vendor/github.com/hashicorp/go-set/README.md b/vendor/github.com/hashicorp/go-set/README.md index 3e212e4f..76d2ae09 100644 --- a/vendor/github.com/hashicorp/go-set/README.md +++ b/vendor/github.com/hashicorp/go-set/README.md @@ -33,7 +33,7 @@ for k := range m { The same result, but in one line using package `go-set`. ```go -list := set.From[string](items).List() +list := set.From[string](items).Slice() ``` # Hash Function @@ -58,6 +58,7 @@ Implements the following set operations - ContainsAll - Subset - Size +- Empty - Union - Difference - Intersect @@ -66,7 +67,7 @@ Provides helper methods - Equal - Copy -- List +- Slice - String # Install diff --git a/vendor/github.com/hashicorp/go-set/hashset.go b/vendor/github.com/hashicorp/go-set/hashset.go index af59be58..7ec79205 100644 --- a/vendor/github.com/hashicorp/go-set/hashset.go +++ b/vendor/github.com/hashicorp/go-set/hashset.go @@ -173,6 +173,11 @@ func (s *HashSet[T, H]) Size() int { return len(s.items) } +// Empty returns true if s contains no elements, false otherwise. +func (s *HashSet[T, H]) Empty() bool { + return s.Size() == 0 +} + // Union returns a set that contains all elements of s and o combined. func (s *HashSet[T, H]) Union(o *HashSet[T, H]) *HashSet[T, H] { result := NewHashSet[T, H](s.Size()) @@ -220,8 +225,8 @@ func (s *HashSet[T, H]) Copy() *HashSet[T, H] { return result } -// List creates a copy of s as a slice. -func (s *HashSet[T, H]) List() []T { +// Slice creates a copy of s as a slice. +func (s *HashSet[T, H]) Slice() []T { result := make([]T, 0, s.Size()) for _, item := range s.items { result = append(result, item) @@ -229,9 +234,25 @@ func (s *HashSet[T, H]) List() []T { return result } -// String creates a string representation of s, using f to transform each element +// List creates a copy of s as a slice. +// +// Deprecated: use Slice() instead. +func (s *HashSet[T, H]) List() []T { + return s.Slice() +} + +// String creates a string representation of s, using "%v" printf formatting to transform +// each element into a string. The result contains elements sorted by their lexical +// string order. +func (s *HashSet[T, H]) String() string { + return s.StringFunc(func(element T) string { + return fmt.Sprintf("%v", element) + }) +} + +// StringFunc creates a string representation of s, using f to transform each element // into a string. The result contains elements sorted by their string order. -func (s *HashSet[T, H]) String(f func(element T) string) string { +func (s *HashSet[T, H]) StringFunc(f func(element T) string) string { l := make([]string, 0, s.Size()) for _, item := range s.items { l = append(l, f(item)) diff --git a/vendor/github.com/hashicorp/go-set/set.go b/vendor/github.com/hashicorp/go-set/set.go index 9a4987a1..090d1c17 100644 --- a/vendor/github.com/hashicorp/go-set/set.go +++ b/vendor/github.com/hashicorp/go-set/set.go @@ -181,6 +181,11 @@ func (s *Set[T]) Size() int { return len(s.items) } +// Empty returns true if s contains no elements, false otherwise. +func (s *Set[T]) Empty() bool { + return s.Size() == 0 +} + // Union returns a set that contains all elements of s and o combined. func (s *Set[T]) Union(o *Set[T]) *Set[T] { result := New[T](s.Size()) @@ -228,8 +233,8 @@ func (s *Set[T]) Copy() *Set[T] { return result } -// List creates a copy of s as a slice. -func (s *Set[T]) List() []T { +// Slice creates a copy of s as a slice. Elements are in no particular order. +func (s *Set[T]) Slice() []T { result := make([]T, 0, s.Size()) for item := range s.items { result = append(result, item) @@ -237,9 +242,25 @@ func (s *Set[T]) List() []T { return result } -// String creates a string representation of s, using f to transform each element -// into a string. The result contains elements sorted by their string order. -func (s *Set[T]) String(f func(element T) string) string { +// List creates a copy of s as a slice. +// +// Deprecated: use Slice() instead. +func (s *Set[T]) List() []T { + return s.Slice() +} + +// String creates a string representation of s, using "%v" printf formating to transform +// each element into a string. The result contains elements sorted by their lexical +// string order. +func (s *Set[T]) String() string { + return s.StringFunc(func(element T) string { + return fmt.Sprintf("%v", element) + }) +} + +// StringFunc creates a string representation of s, using f to transform each element +// into a string. The result contains elements sorted by their lexical string order. +func (s *Set[T]) StringFunc(f func(element T) string) string { l := make([]string, 0, s.Size()) for item := range s.items { l = append(l, f(item)) diff --git a/vendor/github.com/hashicorp/nomad/acl/acl.go b/vendor/github.com/hashicorp/nomad/acl/acl.go index 5c1c3322..02a4da0c 100644 --- a/vendor/github.com/hashicorp/nomad/acl/acl.go +++ b/vendor/github.com/hashicorp/nomad/acl/acl.go @@ -358,13 +358,13 @@ func (a *ACL) AllowHostVolume(ns string) bool { return !capabilities.Check(PolicyDeny) } -func (a *ACL) AllowVariableOperation(ns, path, op string) bool { +func (a *ACL) AllowVariableOperation(ns, path, op string, claim *ACLClaim) bool { if a.management { return true } // Check for a matching capability set - capabilities, ok := a.matchingVariablesCapabilitySet(ns, path) + capabilities, ok := a.matchingVariablesCapabilitySet(ns, path, claim) if !ok { return false } @@ -372,6 +372,13 @@ func (a *ACL) AllowVariableOperation(ns, path, op string) bool { return capabilities.Check(op) } +type ACLClaim struct { + Namespace string + Job string + Group string + Task string +} + // AllowVariableSearch is a very loose check that the token has *any* access to // a variables path for the namespace, with an expectation that the actual // search result will be filtered by specific paths @@ -460,17 +467,31 @@ func (a *ACL) matchingHostVolumeCapabilitySet(name string) (capabilitySet, bool) return a.findClosestMatchingGlob(a.wildcardHostVolumes, name) } -// matchingVariablesCapabilitySet looks for a capabilitySet that matches the namespace and path, -// if no concrete definitions are found, then we return the closest matching -// glob. +var workloadVariablesCapabilitySet = capabilitySet{"read": struct{}{}, "list": struct{}{}} + +// matchingVariablesCapabilitySet looks for a capabilitySet in the following order: +// - matching the namespace and path from a policy +// - automatic access based on the claim +// - closest matching glob +// // The closest matching glob is the one that has the smallest character // difference between the namespace and the glob. -func (a *ACL) matchingVariablesCapabilitySet(ns, path string) (capabilitySet, bool) { +func (a *ACL) matchingVariablesCapabilitySet(ns, path string, claim *ACLClaim) (capabilitySet, bool) { // Check for a concrete matching capability set raw, ok := a.variables.Get([]byte(ns + "\x00" + path)) if ok { return raw.(capabilitySet), true } + if claim != nil && ns == claim.Namespace { + switch path { + case "nomad/jobs", + fmt.Sprintf("nomad/jobs/%s", claim.Job), + fmt.Sprintf("nomad/jobs/%s/%s", claim.Job, claim.Group), + fmt.Sprintf("nomad/jobs/%s/%s/%s", claim.Job, claim.Group, claim.Task): + return workloadVariablesCapabilitySet, true + default: + } + } // We didn't find a concrete match, so lets try and evaluate globs. return a.findClosestMatchingGlob(a.wildcardVariables, ns+"\x00"+path) diff --git a/vendor/github.com/hashicorp/nomad/acl/policy.go b/vendor/github.com/hashicorp/nomad/acl/policy.go index b0658cb7..39d886bb 100644 --- a/vendor/github.com/hashicorp/nomad/acl/policy.go +++ b/vendor/github.com/hashicorp/nomad/acl/policy.go @@ -8,7 +8,7 @@ import ( ) const ( - // The following levels are the only valid values for the `policy = "read"` stanza. + // The following levels are the only valid values for the `policy = "read"` block. // When policies are merged together, the most privilege is granted, except for deny // which always takes precedence and supersedes. PolicyDeny = "deny" @@ -20,7 +20,7 @@ const ( const ( // The following are the fine-grained capabilities that can be granted within a namespace. - // The Policy stanza is a short hand for granting several of these. When capabilities are + // The Policy block is a short hand for granting several of these. When capabilities are // combined we take the union of all capabilities. If the deny capability is present, it // takes precedence and overwrites all other capabilities. @@ -54,7 +54,7 @@ var ( const ( // The following are the fine-grained capabilities that can be granted for a volume set. - // The Policy stanza is a short hand for granting several of these. When capabilities are + // The Policy block is a short hand for granting several of these. When capabilities are // combined we take the union of all capabilities. If the deny capability is present, it // takes precedence and overwrites all other capabilities. diff --git a/vendor/github.com/hashicorp/nomad/ci/ports.go b/vendor/github.com/hashicorp/nomad/ci/ports.go new file mode 100644 index 00000000..d22f9b1f --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/ci/ports.go @@ -0,0 +1,20 @@ +package ci + +import ( + "fmt" + + "github.com/shoenig/test/portal" +) + +type fatalTester struct{} + +func (t *fatalTester) Fatalf(msg string, args ...any) { + panic(fmt.Sprintf(msg, args...)) +} + +// PortAllocator is used to acquire unused ports for testing real network +// listeners. +var PortAllocator = portal.New( + new(fatalTester), + portal.WithAddress("127.0.0.1"), +) diff --git a/vendor/github.com/hashicorp/nomad/client/allocdir/alloc_dir.go b/vendor/github.com/hashicorp/nomad/client/allocdir/alloc_dir.go index da05aacb..f0c79225 100644 --- a/vendor/github.com/hashicorp/nomad/client/allocdir/alloc_dir.go +++ b/vendor/github.com/hashicorp/nomad/client/allocdir/alloc_dir.go @@ -5,7 +5,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "net/http" "os" "path/filepath" @@ -357,12 +356,16 @@ func (d *AllocDir) List(path string) ([]*cstructs.AllocFileInfo, error) { } p := filepath.Join(d.AllocDir, path) - finfos, err := ioutil.ReadDir(p) + finfos, err := os.ReadDir(p) if err != nil { return []*cstructs.AllocFileInfo{}, err } files := make([]*cstructs.AllocFileInfo, len(finfos)) - for idx, info := range finfos { + for idx, file := range finfos { + info, err := file.Info() + if err != nil { + return []*cstructs.AllocFileInfo{}, err + } files[idx] = &cstructs.AllocFileInfo{ Name: info.Name(), IsDir: info.IsDir(), diff --git a/vendor/github.com/hashicorp/nomad/client/allocdir/task_dir.go b/vendor/github.com/hashicorp/nomad/client/allocdir/task_dir.go index d516c313..187b8200 100644 --- a/vendor/github.com/hashicorp/nomad/client/allocdir/task_dir.go +++ b/vendor/github.com/hashicorp/nomad/client/allocdir/task_dir.go @@ -2,7 +2,6 @@ package allocdir import ( "fmt" - "io/ioutil" "os" "path/filepath" @@ -184,12 +183,16 @@ func (t *TaskDir) embedDirs(entries map[string]string) error { } // Enumerate the files in source. - dirEntries, err := ioutil.ReadDir(source) + dirEntries, err := os.ReadDir(source) if err != nil { return fmt.Errorf("Couldn't read directory %v: %v", source, err) } - for _, entry := range dirEntries { + for _, fileEntry := range dirEntries { + entry, err := fileEntry.Info() + if err != nil { + return fmt.Errorf("Couldn't read the file information %v: %v", entry, err) + } hostEntry := filepath.Join(source, entry.Name()) taskEntry := filepath.Join(destDir, filepath.Base(hostEntry)) if entry.IsDir() { diff --git a/vendor/github.com/hashicorp/nomad/client/allocdir/testing.go b/vendor/github.com/hashicorp/nomad/client/allocdir/testing.go index 6ea7d7bb..c534a99d 100644 --- a/vendor/github.com/hashicorp/nomad/client/allocdir/testing.go +++ b/vendor/github.com/hashicorp/nomad/client/allocdir/testing.go @@ -1,7 +1,6 @@ package allocdir import ( - "io/ioutil" "os" hclog "github.com/hashicorp/go-hclog" @@ -11,7 +10,7 @@ import ( // TestAllocDir returns a built alloc dir in a temporary directory and cleanup // func. func TestAllocDir(t testing.T, l hclog.Logger, prefix, id string) (*AllocDir, func()) { - dir, err := ioutil.TempDir("", prefix) + dir, err := os.MkdirTemp("", prefix) if err != nil { t.Fatalf("Couldn't create temp dir: %v", err) } diff --git a/vendor/github.com/hashicorp/nomad/client/config/testing.go b/vendor/github.com/hashicorp/nomad/client/config/testing.go index 51150504..f06fbe74 100644 --- a/vendor/github.com/hashicorp/nomad/client/config/testing.go +++ b/vendor/github.com/hashicorp/nomad/client/config/testing.go @@ -1,7 +1,6 @@ package config import ( - "io/ioutil" "os" "path/filepath" "time" @@ -31,7 +30,7 @@ func TestClientConfig(t testing.T) (*Config, func()) { tmpDir = filepath.Clean(tmpDir) // Create a tempdir to hold state and alloc subdirs - parent, err := ioutil.TempDir(tmpDir, "nomadtest") + parent, err := os.MkdirTemp(tmpDir, "nomadtest") if err != nil { t.Fatalf("error creating client dir: %v", err) } diff --git a/vendor/github.com/hashicorp/nomad/client/dynamicplugins/registry.go b/vendor/github.com/hashicorp/nomad/client/dynamicplugins/registry.go index 3059c1a2..4515c64f 100644 --- a/vendor/github.com/hashicorp/nomad/client/dynamicplugins/registry.go +++ b/vendor/github.com/hashicorp/nomad/client/dynamicplugins/registry.go @@ -269,12 +269,14 @@ func (d *dynamicRegistry) DeregisterPlugin(ptype, name, allocID string) error { } } - broadcaster := d.broadcasterForPluginType(ptype) - event := &PluginUpdateEvent{ - EventType: EventTypeDeregistered, - Info: info, + if info != nil { + broadcaster := d.broadcasterForPluginType(ptype) + event := &PluginUpdateEvent{ + EventType: EventTypeDeregistered, + Info: info, + } + broadcaster.broadcast(event) } - broadcaster.broadcast(event) return d.sync() } diff --git a/vendor/github.com/hashicorp/nomad/client/lib/cgutil/cgutil_linux.go b/vendor/github.com/hashicorp/nomad/client/lib/cgutil/cgutil_linux.go index 178b65b7..84ce6b40 100644 --- a/vendor/github.com/hashicorp/nomad/client/lib/cgutil/cgutil_linux.go +++ b/vendor/github.com/hashicorp/nomad/client/lib/cgutil/cgutil_linux.go @@ -18,7 +18,19 @@ import ( // cgroups.v1 // // This is a read-only value. -var UseV2 = cgroups.IsCgroup2UnifiedMode() +var UseV2 = safelyDetectUnifiedMode() + +// Currently it is possible for the runc utility function to panic +// https://github.com/opencontainers/runc/pull/3745 +func safelyDetectUnifiedMode() (result bool) { + defer func() { + if r := recover(); r != nil { + result = false + } + }() + result = cgroups.IsCgroup2UnifiedMode() + return +} // GetCgroupParent returns the mount point under the root cgroup in which Nomad // will create cgroups. If parent is not set, an appropriate name for the version diff --git a/vendor/github.com/hashicorp/nomad/client/lib/cgutil/cpuset_manager_v1.go b/vendor/github.com/hashicorp/nomad/client/lib/cgutil/cpuset_manager_v1.go index bdf3b347..f0ba00ad 100644 --- a/vendor/github.com/hashicorp/nomad/client/lib/cgutil/cpuset_manager_v1.go +++ b/vendor/github.com/hashicorp/nomad/client/lib/cgutil/cpuset_manager_v1.go @@ -5,7 +5,6 @@ package cgutil import ( "context" "fmt" - "io/ioutil" "os" "path/filepath" "strings" @@ -224,7 +223,7 @@ func (c *cpusetManagerV1) reconcileCpusets() { } // look for reserved cpusets which we don't know about and remove - files, err := ioutil.ReadDir(c.reservedCpusetPath()) + files, err := os.ReadDir(c.reservedCpusetPath()) if err != nil { c.logger.Error("failed to list files in reserved cgroup path during reconciliation", "path", c.reservedCpusetPath(), "error", err) } diff --git a/vendor/github.com/hashicorp/nomad/client/logmon/logging/rotator.go b/vendor/github.com/hashicorp/nomad/client/logmon/logging/rotator.go index 7762f081..a23d6172 100644 --- a/vendor/github.com/hashicorp/nomad/client/logmon/logging/rotator.go +++ b/vendor/github.com/hashicorp/nomad/client/logmon/logging/rotator.go @@ -4,7 +4,6 @@ import ( "bufio" "bytes" "fmt" - "io/ioutil" "os" "path/filepath" "sort" @@ -189,7 +188,7 @@ func (f *FileRotator) nextFile() error { // lastFile finds out the rotated file with the largest index in a path. func (f *FileRotator) lastFile() error { - finfos, err := ioutil.ReadDir(f.path) + finfos, err := os.ReadDir(f.path) if err != nil { return err } @@ -275,7 +274,7 @@ func (f *FileRotator) purgeOldFiles() { select { case <-f.purgeCh: var fIndexes []int - files, err := ioutil.ReadDir(f.path) + files, err := os.ReadDir(f.path) if err != nil { f.logger.Error("error getting directory listing", "error", err) return diff --git a/vendor/github.com/hashicorp/nomad/client/pluginmanager/csimanager/manager.go b/vendor/github.com/hashicorp/nomad/client/pluginmanager/csimanager/manager.go index a5b2f51c..178b0fe1 100644 --- a/vendor/github.com/hashicorp/nomad/client/pluginmanager/csimanager/manager.go +++ b/vendor/github.com/hashicorp/nomad/client/pluginmanager/csimanager/manager.go @@ -149,7 +149,7 @@ func (c *csiManager) resyncPluginsFromRegistry(ptype string) { // handlePluginEvent syncs a single event against the plugin registry func (c *csiManager) handlePluginEvent(event *dynamicplugins.PluginUpdateEvent) { - if event == nil { + if event == nil || event.Info == nil { return } c.logger.Trace("dynamic plugin event", diff --git a/vendor/github.com/hashicorp/nomad/client/serviceregistration/checks/client.go b/vendor/github.com/hashicorp/nomad/client/serviceregistration/checks/client.go index caf82384..ab9c35fb 100644 --- a/vendor/github.com/hashicorp/nomad/client/serviceregistration/checks/client.go +++ b/vendor/github.com/hashicorp/nomad/client/serviceregistration/checks/client.go @@ -163,7 +163,14 @@ func (c *checker) checkHTTP(ctx context.Context, qc *QueryContext, q *Query) *st qr.Status = structs.CheckFailure return qr } - request.Header = q.Headers + for header, values := range q.Headers { + for _, value := range values { + request.Header.Add(header, value) + } + } + + request.Host = request.Header.Get("Host") + request.Body = io.NopCloser(strings.NewReader(q.Body)) request = request.WithContext(ctx) diff --git a/vendor/github.com/hashicorp/nomad/client/serviceregistration/service_registration.go b/vendor/github.com/hashicorp/nomad/client/serviceregistration/service_registration.go index 45746f33..e04aa459 100644 --- a/vendor/github.com/hashicorp/nomad/client/serviceregistration/service_registration.go +++ b/vendor/github.com/hashicorp/nomad/client/serviceregistration/service_registration.go @@ -71,34 +71,32 @@ func (a *AllocRegistration) Copy() *AllocRegistration { return c } -// NumServices returns the number of registered services. +// NumServices returns the number of registered task AND group services. +// Group services are prefixed with "group-". func (a *AllocRegistration) NumServices() int { if a == nil { return 0 } total := 0 - for _, treg := range a.Tasks { - for _, sreg := range treg.Services { - if sreg.Service != nil { - total++ - } - } + for _, task := range a.Tasks { + total += len(task.Services) } return total } -// NumChecks returns the number of registered checks. +// NumChecks returns the number of registered checks from both task AND group +// services. Group services are prefixed with "group-". func (a *AllocRegistration) NumChecks() int { if a == nil { return 0 } total := 0 - for _, treg := range a.Tasks { - for _, sreg := range treg.Services { - total += len(sreg.Checks) + for _, task := range a.Tasks { + for _, service := range task.Services { + total += len(service.Checks) } } diff --git a/vendor/github.com/hashicorp/nomad/client/serviceregistration/workload.go b/vendor/github.com/hashicorp/nomad/client/serviceregistration/workload.go index 7123b7e4..f752cd22 100644 --- a/vendor/github.com/hashicorp/nomad/client/serviceregistration/workload.go +++ b/vendor/github.com/hashicorp/nomad/client/serviceregistration/workload.go @@ -20,13 +20,13 @@ type WorkloadServices struct { ProviderNamespace string // Restarter allows restarting the task or task group depending on the - // check_restart stanzas. + // check_restart blocks. Restarter WorkloadRestarter // Services and checks to register for the task. Services []*structs.Service - // Networks from the task's resources stanza. + // Networks from the task's resources block. // TODO: remove and use Ports Networks structs.Networks diff --git a/vendor/github.com/hashicorp/nomad/client/taskenv/services.go b/vendor/github.com/hashicorp/nomad/client/taskenv/services.go index 08753944..4c110133 100644 --- a/vendor/github.com/hashicorp/nomad/client/taskenv/services.go +++ b/vendor/github.com/hashicorp/nomad/client/taskenv/services.go @@ -15,39 +15,49 @@ func InterpolateServices(taskEnv *TaskEnv, services []*structs.Service) []*struc interpolated := make([]*structs.Service, len(services)) - for i, origService := range services { - // Create a copy as we need to re-interpolate every time the - // environment changes. - service := origService.Copy() - - for _, check := range service.Checks { - check.Name = taskEnv.ReplaceEnv(check.Name) - check.Type = taskEnv.ReplaceEnv(check.Type) - check.Command = taskEnv.ReplaceEnv(check.Command) - check.Args = taskEnv.ParseAndReplace(check.Args) - check.Path = taskEnv.ReplaceEnv(check.Path) - check.Protocol = taskEnv.ReplaceEnv(check.Protocol) - check.PortLabel = taskEnv.ReplaceEnv(check.PortLabel) - check.InitialStatus = taskEnv.ReplaceEnv(check.InitialStatus) - check.Method = taskEnv.ReplaceEnv(check.Method) - check.GRPCService = taskEnv.ReplaceEnv(check.GRPCService) - check.Header = interpolateMapStringSliceString(taskEnv, check.Header) - } + for i, service := range services { + interpolated[i] = InterpolateService(taskEnv, service) + } - service.Name = taskEnv.ReplaceEnv(service.Name) - service.PortLabel = taskEnv.ReplaceEnv(service.PortLabel) - service.Address = taskEnv.ReplaceEnv(service.Address) - service.Tags = taskEnv.ParseAndReplace(service.Tags) - service.CanaryTags = taskEnv.ParseAndReplace(service.CanaryTags) - service.Meta = interpolateMapStringString(taskEnv, service.Meta) - service.CanaryMeta = interpolateMapStringString(taskEnv, service.CanaryMeta) - service.TaggedAddresses = interpolateMapStringString(taskEnv, service.TaggedAddresses) - interpolateConnect(taskEnv, service.Connect) + return interpolated +} - interpolated[i] = service +func InterpolateService(taskEnv *TaskEnv, origService *structs.Service) *structs.Service { + // Guard against not having a valid taskEnv. This can be the case if the + // PreKilling or Exited hook is run before Poststart. + if taskEnv == nil || origService == nil { + return nil } - return interpolated + // Create a copy as we need to re-interpolate every time the + // environment changes. + service := origService.Copy() + + for _, check := range service.Checks { + check.Name = taskEnv.ReplaceEnv(check.Name) + check.Type = taskEnv.ReplaceEnv(check.Type) + check.Command = taskEnv.ReplaceEnv(check.Command) + check.Args = taskEnv.ParseAndReplace(check.Args) + check.Path = taskEnv.ReplaceEnv(check.Path) + check.Protocol = taskEnv.ReplaceEnv(check.Protocol) + check.PortLabel = taskEnv.ReplaceEnv(check.PortLabel) + check.InitialStatus = taskEnv.ReplaceEnv(check.InitialStatus) + check.Method = taskEnv.ReplaceEnv(check.Method) + check.GRPCService = taskEnv.ReplaceEnv(check.GRPCService) + check.Header = interpolateMapStringSliceString(taskEnv, check.Header) + } + + service.Name = taskEnv.ReplaceEnv(service.Name) + service.PortLabel = taskEnv.ReplaceEnv(service.PortLabel) + service.Address = taskEnv.ReplaceEnv(service.Address) + service.Tags = taskEnv.ParseAndReplace(service.Tags) + service.CanaryTags = taskEnv.ParseAndReplace(service.CanaryTags) + service.Meta = interpolateMapStringString(taskEnv, service.Meta) + service.CanaryMeta = interpolateMapStringString(taskEnv, service.CanaryMeta) + service.TaggedAddresses = interpolateMapStringString(taskEnv, service.TaggedAddresses) + interpolateConnect(taskEnv, service.Connect) + + return service } func interpolateMapStringSliceString(taskEnv *TaskEnv, orig map[string][]string) map[string][]string { diff --git a/vendor/github.com/hashicorp/nomad/command/agent/host/host.go b/vendor/github.com/hashicorp/nomad/command/agent/host/host.go index 9119f7b9..08aeb53b 100644 --- a/vendor/github.com/hashicorp/nomad/command/agent/host/host.go +++ b/vendor/github.com/hashicorp/nomad/command/agent/host/host.go @@ -1,7 +1,7 @@ package host import ( - "io/ioutil" + "io" "os" "strings" ) @@ -119,7 +119,7 @@ func slurp(path string) string { return err.Error() } - bs, err := ioutil.ReadAll(fh) + bs, err := io.ReadAll(fh) if err != nil { return err.Error() } diff --git a/vendor/github.com/hashicorp/nomad/helper/freeport/ephemeral_darwin.go b/vendor/github.com/hashicorp/nomad/helper/freeport/ephemeral_darwin.go deleted file mode 100644 index 48277420..00000000 --- a/vendor/github.com/hashicorp/nomad/helper/freeport/ephemeral_darwin.go +++ /dev/null @@ -1,47 +0,0 @@ -//go:build darwin -// +build darwin - -package freeport - -import ( - "fmt" - "os/exec" - "regexp" - "strconv" -) - -/* -$ sysctl net.inet.ip.portrange.first net.inet.ip.portrange.last -net.inet.ip.portrange.first: 49152 -net.inet.ip.portrange.last: 65535 -*/ - -const ( - ephPortFirst = "net.inet.ip.portrange.first" - ephPortLast = "net.inet.ip.portrange.last" - command = "sysctl" -) - -var ephPortRe = regexp.MustCompile(`^\s*(\d+)\s+(\d+)\s*$`) - -func getEphemeralPortRange() (int, int, error) { - cmd := exec.Command(command, "-n", ephPortFirst, ephPortLast) - out, err := cmd.Output() - if err != nil { - return 0, 0, err - } - - val := string(out) - - m := ephPortRe.FindStringSubmatch(val) - if m != nil { - min, err1 := strconv.Atoi(m[1]) - max, err2 := strconv.Atoi(m[2]) - - if err1 == nil && err2 == nil { - return min, max, nil - } - } - - return 0, 0, fmt.Errorf("unexpected sysctl value %q for keys %q %q", val, ephPortFirst, ephPortLast) -} diff --git a/vendor/github.com/hashicorp/nomad/helper/freeport/ephemeral_freebsd.go b/vendor/github.com/hashicorp/nomad/helper/freeport/ephemeral_freebsd.go deleted file mode 100644 index 64b420eb..00000000 --- a/vendor/github.com/hashicorp/nomad/helper/freeport/ephemeral_freebsd.go +++ /dev/null @@ -1,47 +0,0 @@ -//go:build freebsd -// +build freebsd - -package freeport - -import ( - "fmt" - "os/exec" - "regexp" - "strconv" -) - -/* -$ sysctl net.inet.ip.portrange.first net.inet.ip.portrange.last -net.inet.ip.portrange.first: 49152 -net.inet.ip.portrange.last: 65535 -*/ - -const ( - ephPortFirst = "net.inet.ip.portrange.first" - ephPortLast = "net.inet.ip.portrange.last" - command = "sysctl" -) - -var ephPortRe = regexp.MustCompile(`^\s*(\d+)\s+(\d+)\s*$`) - -func getEphemeralPortRange() (int, int, error) { - cmd := exec.Command(command, "-n", ephPortFirst, ephPortLast) - out, err := cmd.Output() - if err != nil { - return 0, 0, err - } - - val := string(out) - - m := ephPortRe.FindStringSubmatch(val) - if m != nil { - min, err1 := strconv.Atoi(m[1]) - max, err2 := strconv.Atoi(m[2]) - - if err1 == nil && err2 == nil { - return min, max, nil - } - } - - return 0, 0, fmt.Errorf("unexpected sysctl value %q for keys %q %q", val, ephPortFirst, ephPortLast) -} diff --git a/vendor/github.com/hashicorp/nomad/helper/freeport/ephemeral_linux.go b/vendor/github.com/hashicorp/nomad/helper/freeport/ephemeral_linux.go deleted file mode 100644 index 4e6de69e..00000000 --- a/vendor/github.com/hashicorp/nomad/helper/freeport/ephemeral_linux.go +++ /dev/null @@ -1,42 +0,0 @@ -//go:build linux -// +build linux - -package freeport - -import ( - "fmt" - "os/exec" - "regexp" - "strconv" -) - -/* -$ sysctl -n net.ipv4.ip_local_port_range -32768 60999 -*/ - -const ephemeralPortRangeSysctlKey = "net.ipv4.ip_local_port_range" - -var ephemeralPortRangePatt = regexp.MustCompile(`^\s*(\d+)\s+(\d+)\s*$`) - -func getEphemeralPortRange() (int, int, error) { - cmd := exec.Command("sysctl", "-n", ephemeralPortRangeSysctlKey) - out, err := cmd.Output() - if err != nil { - return 0, 0, err - } - - val := string(out) - - m := ephemeralPortRangePatt.FindStringSubmatch(val) - if m != nil { - min, err1 := strconv.Atoi(m[1]) - max, err2 := strconv.Atoi(m[2]) - - if err1 == nil && err2 == nil { - return min, max, nil - } - } - - return 0, 0, fmt.Errorf("unexpected sysctl value %q for key %q", val, ephemeralPortRangeSysctlKey) -} diff --git a/vendor/github.com/hashicorp/nomad/helper/freeport/ephemeral_windows.go b/vendor/github.com/hashicorp/nomad/helper/freeport/ephemeral_windows.go deleted file mode 100644 index c4cd40df..00000000 --- a/vendor/github.com/hashicorp/nomad/helper/freeport/ephemeral_windows.go +++ /dev/null @@ -1,12 +0,0 @@ -//go:build windows -// +build windows - -package freeport - -// For now we hard-code the Windows ephemeral port range, which is documented by -// Microsoft to be in this range for Vista / Server 2008 and newer. -// -// https://support.microsoft.com/en-us/help/832017/service-overview-and-network-port-requirements-for-windows -func getEphemeralPortRange() (int, int, error) { - return 49152, 65535, nil -} diff --git a/vendor/github.com/hashicorp/nomad/helper/freeport/freeport.go b/vendor/github.com/hashicorp/nomad/helper/freeport/freeport.go deleted file mode 100644 index 7f4d268c..00000000 --- a/vendor/github.com/hashicorp/nomad/helper/freeport/freeport.go +++ /dev/null @@ -1,297 +0,0 @@ -// Copied from github.com/hashicorp/consul/sdk/freeport -// -// and tweaked for use by Nomad. -package freeport - -import ( - "container/list" - "fmt" - "math/rand" - "net" - "os" - "runtime" - "sync" - "time" -) - -// todo(shoenig) -// There is a conflict between this copy of the updated sdk/freeport package -// and the lib/freeport package that is vendored as of nomad v0.10.x, which -// means we need to be careful to avoid the ports that transitive dependency -// is going to use (i.e. 10,000+). For now, we use the 9XXX port range with -// small blocks which means some tests will have to wait, and we need to be -// very careful not to leak ports. - -const ( - // blockSize is the size of the allocated port block. ports are given out - // consecutively from that block and after that point in a LRU fashion. - // blockSize = 1500 - blockSize = 100 // todo(shoenig) revert once consul dependency is updated - - // maxBlocks is the number of available port blocks before exclusions. - // maxBlocks = 30 - maxBlocks = 10 // todo(shoenig) revert once consul dependency is updated - - // lowPort is the lowest port number that should be used. - // lowPort = 10000 - lowPort = 9000 // todo(shoenig) revert once consul dependency is updated - - // attempts is how often we try to allocate a port block - // before giving up. - attempts = 10 -) - -var ( - // effectiveMaxBlocks is the number of available port blocks. - // lowPort + effectiveMaxBlocks * blockSize must be less than 65535. - effectiveMaxBlocks int - - // firstPort is the first port of the allocated block. - firstPort int - - // lockLn is the system-wide mutex for the port block. - lockLn net.Listener - - // mu guards: - // - pendingPorts - // - freePorts - // - total - mu sync.Mutex - - // once is used to do the initialization on the first call to retrieve free - // ports - once sync.Once - - // condNotEmpty is a condition variable to wait for freePorts to be not - // empty. Linked to 'mu' - condNotEmpty *sync.Cond - - // freePorts is a FIFO of all currently free ports. Take from the front, - // and return to the back. - freePorts *list.List - - // pendingPorts is a FIFO of recently freed ports that have not yet passed - // the not-in-use check. - pendingPorts *list.List - - // total is the total number of available ports in the block for use. - total int -) - -// initialize is used to initialize freeport. -func initialize() { - var err error - effectiveMaxBlocks, err = adjustMaxBlocks() - if err != nil { - panic("freeport: ephemeral port range detection failed: " + err.Error()) - } - if effectiveMaxBlocks < 0 { - panic("freeport: no blocks of ports available outside of ephemeral range") - } - if lowPort+effectiveMaxBlocks*blockSize > 65535 { - panic("freeport: block size too big or too many blocks requested") - } - - rand.Seed(time.Now().UnixNano()) - firstPort, lockLn = alloc() - - condNotEmpty = sync.NewCond(&mu) - freePorts = list.New() - pendingPorts = list.New() - - // fill with all available free ports - for port := firstPort + 1; port < firstPort+blockSize; port++ { - if used := isPortInUse(port); !used { - freePorts.PushBack(port) - } - } - total = freePorts.Len() - - go checkFreedPorts() -} - -func checkFreedPorts() { - ticker := time.NewTicker(250 * time.Millisecond) - for { - <-ticker.C - checkFreedPortsOnce() - } -} - -func checkFreedPortsOnce() { - mu.Lock() - defer mu.Unlock() - - pending := pendingPorts.Len() - remove := make([]*list.Element, 0, pending) - for elem := pendingPorts.Front(); elem != nil; elem = elem.Next() { - port := elem.Value.(int) - if used := isPortInUse(port); !used { - freePorts.PushBack(port) - remove = append(remove, elem) - } - } - - retained := pending - len(remove) - - if retained > 0 { - logf("WARN", "%d out of %d pending ports are still in use; something probably didn't wait around for the port to be closed!", retained, pending) - } - - if len(remove) == 0 { - return - } - - for _, elem := range remove { - pendingPorts.Remove(elem) - } - - condNotEmpty.Broadcast() -} - -// adjustMaxBlocks avoids having the allocation ranges overlap the ephemeral -// port range. -func adjustMaxBlocks() (int, error) { - ephemeralPortMin, ephemeralPortMax, err := getEphemeralPortRange() - if err != nil { - return 0, err - } - - if ephemeralPortMin <= 0 || ephemeralPortMax <= 0 { - logf("INFO", "ephemeral port range detection not configured for GOOS=%q", runtime.GOOS) - return maxBlocks, nil - } - - logf("INFO", "detected ephemeral port range of [%d, %d]", ephemeralPortMin, ephemeralPortMax) - for block := 0; block < maxBlocks; block++ { - min := lowPort + block*blockSize - max := min + blockSize - overlap := intervalOverlap(min, max-1, ephemeralPortMin, ephemeralPortMax) - if overlap { - logf("INFO", "reducing max blocks from %d to %d to avoid the ephemeral port range", maxBlocks, block) - return block, nil - } - } - return maxBlocks, nil -} - -// alloc reserves a port block for exclusive use for the lifetime of the -// application. lockLn serves as a system-wide mutex for the port block and is -// implemented as a TCP listener which is bound to the firstPort and which will -// be automatically released when the application terminates. -func alloc() (int, net.Listener) { - for i := 0; i < attempts; i++ { - block := int(rand.Int31n(int32(effectiveMaxBlocks))) - firstPort := lowPort + block*blockSize - ln, err := net.ListenTCP("tcp", tcpAddr("127.0.0.1", firstPort)) - if err != nil { - continue - } - // logf("DEBUG", "allocated port block %d (%d-%d)", block, firstPort, firstPort+blockSize-1) - return firstPort, ln - } - panic("freeport: cannot allocate port block") -} - -// MustTake is the same as Take except it panics on error. -func MustTake(n int) (ports []int) { - ports, err := Take(n) - if err != nil { - panic(err) - } - return ports -} - -// Take returns a list of free ports from the allocated port block. It is safe -// to call this method concurrently. Ports have been tested to be available on -// 127.0.0.1 TCP but there is no guarantee that they will remain free in the -// future. -func Take(n int) (ports []int, err error) { - if n <= 0 { - return nil, fmt.Errorf("freeport: cannot take %d ports", n) - } - - mu.Lock() - defer mu.Unlock() - - // Reserve a port block - once.Do(initialize) - - if n > total { - return nil, fmt.Errorf("freeport: block size too small") - } - - for len(ports) < n { - for freePorts.Len() == 0 { - if total == 0 { - return nil, fmt.Errorf("freeport: impossible to satisfy request; there are no actual free ports in the block anymore") - } - condNotEmpty.Wait() - } - - elem := freePorts.Front() - freePorts.Remove(elem) - port := elem.Value.(int) - - if used := isPortInUse(port); used { - // Something outside of the test suite has stolen this port, possibly - // due to assignment to an ephemeral port, remove it completely. - logf("WARN", "leaked port %d due to theft; removing from circulation", port) - total-- - continue - } - - ports = append(ports, port) - } - - // logf("DEBUG", "free ports: %v", ports) - return ports, nil -} - -// Return returns a block of ports back to the general pool. These ports should -// have been returned from a call to Take(). -func Return(ports []int) { - if len(ports) == 0 { - return // convenience short circuit for test ergonomics - } - - mu.Lock() - defer mu.Unlock() - - for _, port := range ports { - if port > firstPort && port < firstPort+blockSize { - pendingPorts.PushBack(port) - } - } -} - -func isPortInUse(port int) bool { - ln, err := net.ListenTCP("tcp", tcpAddr("127.0.0.1", port)) - if err != nil { - return true - } - _ = ln.Close() - return false -} - -func tcpAddr(ip string, port int) *net.TCPAddr { - return &net.TCPAddr{IP: net.ParseIP(ip), Port: port} -} - -// intervalOverlap returns true if the doubly-inclusive integer intervals -// represented by [min1, max1] and [min2, max2] overlap. -func intervalOverlap(min1, max1, min2, max2 int) bool { - if min1 > max1 { - logf("WARN", "interval1 is not ordered [%d, %d]", min1, max1) - return false - } - if min2 > max2 { - logf("WARN", "interval2 is not ordered [%d, %d]", min2, max2) - return false - } - return min1 <= max2 && min2 <= max1 -} - -func logf(severity string, format string, a ...interface{}) { - _, _ = fmt.Fprintf(os.Stderr, "["+severity+"] freeport: "+format+"\n", a...) -} diff --git a/vendor/github.com/hashicorp/nomad/helper/tlsutil/config.go b/vendor/github.com/hashicorp/nomad/helper/tlsutil/config.go new file mode 100644 index 00000000..bac61a4b --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/helper/tlsutil/config.go @@ -0,0 +1,498 @@ +package tlsutil + +import ( + "crypto/ecdsa" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "fmt" + "net" + "os" + "strings" + "time" + + "github.com/hashicorp/nomad/nomad/structs/config" +) + +// supportedTLSVersions are the current TLS versions that Nomad supports +var supportedTLSVersions = map[string]uint16{ + "tls10": tls.VersionTLS10, + "tls11": tls.VersionTLS11, + "tls12": tls.VersionTLS12, +} + +// supportedTLSCiphers are the complete list of TLS ciphers supported by Nomad +var supportedTLSCiphers = map[string]uint16{ + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + "TLS_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_RSA_WITH_AES_128_GCM_SHA256, + "TLS_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_RSA_WITH_AES_256_GCM_SHA384, + "TLS_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_RSA_WITH_AES_128_CBC_SHA256, + "TLS_RSA_WITH_AES_128_CBC_SHA": tls.TLS_RSA_WITH_AES_128_CBC_SHA, + "TLS_RSA_WITH_AES_256_CBC_SHA": tls.TLS_RSA_WITH_AES_256_CBC_SHA, +} + +// signatureAlgorithm is the string representation of a signing algorithm +type signatureAlgorithm string + +const ( + rsaStringRepr signatureAlgorithm = "RSA" + ecdsaStringRepr signatureAlgorithm = "ECDSA" +) + +// supportedCipherSignatures is the supported cipher suites with their +// corresponding signature algorithm +var supportedCipherSignatures = map[string]signatureAlgorithm{ + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305": rsaStringRepr, + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305": ecdsaStringRepr, + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": rsaStringRepr, + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": ecdsaStringRepr, + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": rsaStringRepr, + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": ecdsaStringRepr, + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256": rsaStringRepr, + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": rsaStringRepr, + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256": ecdsaStringRepr, + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": ecdsaStringRepr, + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": rsaStringRepr, + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA": ecdsaStringRepr, + "TLS_RSA_WITH_AES_128_GCM_SHA256": rsaStringRepr, + "TLS_RSA_WITH_AES_256_GCM_SHA384": rsaStringRepr, + "TLS_RSA_WITH_AES_128_CBC_SHA256": rsaStringRepr, + "TLS_RSA_WITH_AES_128_CBC_SHA": rsaStringRepr, + "TLS_RSA_WITH_AES_256_CBC_SHA": rsaStringRepr, +} + +// defaultTLSCiphers are the TLS Ciphers that are supported by default +var defaultTLSCiphers = []string{ + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", +} + +// RegionSpecificWrapper is used to invoke a static Region and turns a +// RegionWrapper into a Wrapper type. +func RegionSpecificWrapper(region string, tlsWrap RegionWrapper) Wrapper { + if tlsWrap == nil { + return nil + } + return func(conn net.Conn) (net.Conn, error) { + return tlsWrap(region, conn) + } +} + +// RegionWrapper is a function that is used to wrap a non-TLS connection and +// returns an appropriate TLS connection or error. This takes a Region as an +// argument. +type RegionWrapper func(region string, conn net.Conn) (net.Conn, error) + +// Wrapper wraps a connection and enables TLS on it. +type Wrapper func(conn net.Conn) (net.Conn, error) + +// Config used to create tls.Config +type Config struct { + // VerifyIncoming is used to verify the authenticity of incoming connections. + // This means that TCP requests are forbidden, only allowing for TLS. TLS connections + // must match a provided certificate authority. This can be used to force client auth. + VerifyIncoming bool + + // VerifyOutgoing is used to verify the authenticity of outgoing connections. + // This means that TLS requests are used, and TCP requests are not made. TLS connections + // must match a provided certificate authority. This is used to verify authenticity of + // server nodes. + VerifyOutgoing bool + + // VerifyServerHostname is used to enable hostname verification of servers. This + // ensures that the certificate presented is valid for server... + // This prevents a compromised client from being restarted as a server, and then + // intercepting request traffic as well as being added as a raft peer. This should be + // enabled by default with VerifyOutgoing, but for legacy reasons we cannot break + // existing clients. + VerifyServerHostname bool + + // CAFile is a path to a certificate authority file. This is used with VerifyIncoming + // or VerifyOutgoing to verify the TLS connection. + CAFile string + + // CertFile is used to provide a TLS certificate that is used for serving TLS connections. + // Must be provided to serve TLS connections. + CertFile string + + // KeyFile is used to provide a TLS key that is used for serving TLS connections. + // Must be provided to serve TLS connections. + KeyFile string + + // KeyLoader dynamically reloads TLS configuration. + KeyLoader *config.KeyLoader + + // CipherSuites have a default safe configuration, or operators can override + // these values for acceptable safe alternatives. + CipherSuites []uint16 + + // PreferServerCipherSuites controls whether the server selects the + // client's most preferred ciphersuite, or the server's most preferred + // ciphersuite. If true then the server's preference, as expressed in + // the order of elements in CipherSuites, is used. + PreferServerCipherSuites bool + + // MinVersion contains the minimum SSL/TLS version that is accepted. + MinVersion uint16 +} + +func NewTLSConfiguration(newConf *config.TLSConfig, verifyIncoming, verifyOutgoing bool) (*Config, error) { + ciphers, err := ParseCiphers(newConf) + if err != nil { + return nil, err + } + + minVersion, err := ParseMinVersion(newConf.TLSMinVersion) + if err != nil { + return nil, err + } + + return &Config{ + VerifyIncoming: verifyIncoming, + VerifyOutgoing: verifyOutgoing, + VerifyServerHostname: newConf.VerifyServerHostname, + CAFile: newConf.CAFile, + CertFile: newConf.CertFile, + KeyFile: newConf.KeyFile, + KeyLoader: newConf.GetKeyLoader(), + CipherSuites: ciphers, + MinVersion: minVersion, + PreferServerCipherSuites: newConf.TLSPreferServerCipherSuites, + }, nil +} + +// AppendCA opens and parses the CA file and adds the certificates to +// the provided CertPool. +func (c *Config) AppendCA(pool *x509.CertPool) error { + if c.CAFile == "" { + return nil + } + + // Read the file + data, err := os.ReadFile(c.CAFile) + if err != nil { + return fmt.Errorf("Failed to read CA file: %v", err) + } + + // Read certificates and return an error if no valid certificates were + // found. Unfortunately it is very difficult to return meaningful + // errors as PEM files are extremely permissive. + if !pool.AppendCertsFromPEM(data) { + return fmt.Errorf("Failed to parse any valid certificates in CA file: %s", c.CAFile) + } + + return nil +} + +// LoadKeyPair is used to open and parse a certificate and key file +func (c *Config) LoadKeyPair() (*tls.Certificate, error) { + if c.CertFile == "" || c.KeyFile == "" { + return nil, nil + } + + if c.KeyLoader == nil { + return nil, fmt.Errorf("No Keyloader object to perform LoadKeyPair") + } + + cert, err := c.KeyLoader.LoadKeyPair(c.CertFile, c.KeyFile) + if err != nil { + return nil, fmt.Errorf("Failed to load cert/key pair: %v", err) + } + return cert, err +} + +// OutgoingTLSConfig generates a TLS configuration for outgoing +// requests. It will return a nil config if this configuration should +// not use TLS for outgoing connections. Provides a callback to +// fetch certificates, allowing for reloading on the fly. +func (c *Config) OutgoingTLSConfig() (*tls.Config, error) { + // If VerifyServerHostname is true, that implies VerifyOutgoing + if c.VerifyServerHostname { + c.VerifyOutgoing = true + } + if !c.VerifyOutgoing { + return nil, nil + } + // Create the tlsConfig + tlsConfig := &tls.Config{ + RootCAs: x509.NewCertPool(), + InsecureSkipVerify: true, + CipherSuites: c.CipherSuites, + MinVersion: c.MinVersion, + PreferServerCipherSuites: c.PreferServerCipherSuites, + } + if c.VerifyServerHostname { + tlsConfig.InsecureSkipVerify = false + } + + // Ensure we have a CA if VerifyOutgoing is set + if c.VerifyOutgoing && c.CAFile == "" { + return nil, fmt.Errorf("VerifyOutgoing set, and no CA certificate provided!") + } + + // Parse the CA cert if any + err := c.AppendCA(tlsConfig.RootCAs) + if err != nil { + return nil, err + } + + cert, err := c.LoadKeyPair() + if err != nil { + return nil, err + } else if cert != nil { + tlsConfig.GetCertificate = c.KeyLoader.GetOutgoingCertificate + tlsConfig.GetClientCertificate = c.KeyLoader.GetClientCertificate + } + + return tlsConfig, nil +} + +// OutgoingTLSWrapper returns a a Wrapper based on the OutgoingTLS +// configuration. If hostname verification is on, the wrapper +// will properly generate the dynamic server name for verification. +func (c *Config) OutgoingTLSWrapper() (RegionWrapper, error) { + // Get the TLS config + tlsConfig, err := c.OutgoingTLSConfig() + if err != nil { + return nil, err + } + + // Check if TLS is not enabled + if tlsConfig == nil { + return nil, nil + } + + // Generate the wrapper based on hostname verification + if c.VerifyServerHostname { + wrapper := func(region string, conn net.Conn) (net.Conn, error) { + conf := tlsConfig.Clone() + conf.ServerName = "server." + region + ".nomad" + return WrapTLSClient(conn, conf) + } + return wrapper, nil + } else { + wrapper := func(dc string, c net.Conn) (net.Conn, error) { + return WrapTLSClient(c, tlsConfig) + } + return wrapper, nil + } + +} + +// WrapTLSClient wraps a net.Conn into a client tls connection, performing any +// additional verification as needed. +// +// As of go 1.3, crypto/tls only supports either doing no certificate +// verification, or doing full verification including of the peer's +// DNS name. For consul, we want to validate that the certificate is +// signed by a known CA, but because consul doesn't use DNS names for +// node names, we don't verify the certificate DNS names. Since go 1.3 +// no longer supports this mode of operation, we have to do it +// manually. +func WrapTLSClient(conn net.Conn, tlsConfig *tls.Config) (net.Conn, error) { + tlsConn := tls.Client(conn, tlsConfig) + + // If crypto/tls is doing verification, there's no need to do + // our own. + if !tlsConfig.InsecureSkipVerify { + return tlsConn, nil + } + + if err := tlsConn.Handshake(); err != nil { + tlsConn.Close() + return nil, err + } + + // The following is lightly-modified from the doFullHandshake + // method in crypto/tls's handshake_client.go. + opts := x509.VerifyOptions{ + Roots: tlsConfig.RootCAs, + CurrentTime: time.Now(), + DNSName: "", + Intermediates: x509.NewCertPool(), + } + + certs := tlsConn.ConnectionState().PeerCertificates + for i, cert := range certs { + if i == 0 { + continue + } + opts.Intermediates.AddCert(cert) + } + + _, err := certs[0].Verify(opts) + if err != nil { + tlsConn.Close() + return nil, err + } + + return tlsConn, nil +} + +// IncomingTLSConfig generates a TLS configuration for incoming requests +func (c *Config) IncomingTLSConfig() (*tls.Config, error) { + // Create the tlsConfig + tlsConfig := &tls.Config{ + ClientCAs: x509.NewCertPool(), + ClientAuth: tls.NoClientCert, + CipherSuites: c.CipherSuites, + MinVersion: c.MinVersion, + PreferServerCipherSuites: c.PreferServerCipherSuites, + } + + // Parse the CA cert if any + err := c.AppendCA(tlsConfig.ClientCAs) + if err != nil { + return nil, err + } + + // Add cert/key + cert, err := c.LoadKeyPair() + if err != nil { + return nil, err + } else if cert != nil { + tlsConfig.GetCertificate = c.KeyLoader.GetOutgoingCertificate + } + + // Check if we require verification + if c.VerifyIncoming { + tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert + if c.CAFile == "" { + return nil, fmt.Errorf("VerifyIncoming set, and no CA certificate provided!") + } + if cert == nil { + return nil, fmt.Errorf("VerifyIncoming set, and no Cert/Key pair provided!") + } + } + + return tlsConfig, nil +} + +// ParseCiphers parses ciphersuites from the comma-separated string into +// recognized slice +func ParseCiphers(tlsConfig *config.TLSConfig) ([]uint16, error) { + suites := []uint16{} + + cipherStr := strings.TrimSpace(tlsConfig.TLSCipherSuites) + + var parsedCiphers []string + if cipherStr == "" { + parsedCiphers = defaultTLSCiphers + + } else { + parsedCiphers = strings.Split(tlsConfig.TLSCipherSuites, ",") + } + for _, cipher := range parsedCiphers { + c, ok := supportedTLSCiphers[cipher] + if !ok { + return suites, fmt.Errorf("unsupported TLS cipher %q", cipher) + } + suites = append(suites, c) + } + + // Ensure that the specified cipher suite list is supported by the TLS + // Certificate signature algorithm. This is a check for user error, where a + // TLS certificate could support RSA but a user has configured a cipher suite + // list of ciphers where only ECDSA is supported. + keyLoader := tlsConfig.GetKeyLoader() + + // Ensure that the keypair has been loaded before continuing + keyLoader.LoadKeyPair(tlsConfig.CertFile, tlsConfig.KeyFile) + + if keyLoader.GetCertificate() != nil { + supportedSignatureAlgorithm, err := getSignatureAlgorithm(keyLoader.GetCertificate()) + if err != nil { + return []uint16{}, err + } + + for _, cipher := range parsedCiphers { + if supportedCipherSignatures[cipher] == supportedSignatureAlgorithm { + // Positive case, return the matched cipher suites as the signature + // algorithm is also supported + return suites, nil + } + } + + // Negative case, if this is reached it means that none of the specified + // cipher suites signature algorithms match the signature algorithm + // for the certificate. + return []uint16{}, fmt.Errorf("Specified cipher suites don't support the certificate signature algorithm %s, consider adding more cipher suites to match this signature algorithm.", supportedSignatureAlgorithm) + } + + // Default in case this function is called but TLS is not actually configured + // This is only reached if the TLS certificate is nil + return []uint16{}, nil +} + +// getSignatureAlgorithm returns the signature algorithm for a TLS certificate +// This is determined by examining the type of the certificate's public key, +// as Golang doesn't expose a more straightforward API which returns this +// type +func getSignatureAlgorithm(tlsCert *tls.Certificate) (signatureAlgorithm, error) { + privKey := tlsCert.PrivateKey + switch privKey.(type) { + case *rsa.PrivateKey: + return rsaStringRepr, nil + case *ecdsa.PrivateKey: + return ecdsaStringRepr, nil + default: + return "", fmt.Errorf("Unsupported signature algorithm %T; RSA and ECDSA only are supported.", privKey) + } +} + +// ParseMinVersion parses the specified minimum TLS version for the Nomad agent +func ParseMinVersion(version string) (uint16, error) { + if version == "" { + return supportedTLSVersions["tls12"], nil + } + + vers, ok := supportedTLSVersions[version] + if !ok { + return 0, fmt.Errorf("unsupported TLS version %q", version) + } + + return vers, nil +} + +// ShouldReloadRPCConnections compares two TLS Configurations and determines +// whether they differ such that RPC connections should be reloaded +func ShouldReloadRPCConnections(old, new *config.TLSConfig) (bool, error) { + var certificateInfoEqual bool + var rpcInfoEqual bool + + // If already configured with TLS, compare with the new TLS configuration + if new != nil { + var err error + certificateInfoEqual, err = new.CertificateInfoIsEqual(old) + if err != nil { + return false, err + } + } else if new == nil && old == nil { + certificateInfoEqual = true + } + + if new != nil && old != nil && new.EnableRPC == old.EnableRPC { + rpcInfoEqual = true + } + + return (!rpcInfoEqual || !certificateInfoEqual), nil +} diff --git a/vendor/github.com/hashicorp/nomad/helper/tlsutil/generate.go b/vendor/github.com/hashicorp/nomad/helper/tlsutil/generate.go new file mode 100644 index 00000000..cf584cf1 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/helper/tlsutil/generate.go @@ -0,0 +1,313 @@ +package tlsutil + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "math/big" + "net" + "time" +) + +// GenerateSerialNumber returns random bigint generated with crypto/rand +func GenerateSerialNumber() (*big.Int, error) { + l := new(big.Int).Lsh(big.NewInt(1), 128) + s, err := rand.Int(rand.Reader, l) + if err != nil { + return nil, err + } + return s, nil +} + +// GeneratePrivateKey generates a new ecdsa private key +func GeneratePrivateKey() (crypto.Signer, string, error) { + curve := elliptic.P256() + + pk, err := ecdsa.GenerateKey(curve, rand.Reader) + if err != nil { + return nil, "", fmt.Errorf("error generating ECDSA private key: %s", err) + } + + bs, err := x509.MarshalECPrivateKey(pk) + if err != nil { + return nil, "", fmt.Errorf("error marshaling ECDSA private key: %s", err) + } + + pemBlock, err := pemEncodeKey(bs, "EC PRIVATE KEY") + if err != nil { + return nil, "", err + } + + return pk, pemBlock, nil +} + +func pemEncodeKey(key []byte, blockType string) (string, error) { + var buf bytes.Buffer + + if err := pem.Encode(&buf, &pem.Block{Type: blockType, Bytes: key}); err != nil { + return "", fmt.Errorf("error encoding private key: %s", err) + } + return buf.String(), nil +} + +type CAOpts struct { + Signer crypto.Signer + Serial *big.Int + Days int + PermittedDNSDomains []string + Domain string + Name string +} + +type CertOpts struct { + Signer crypto.Signer + CA string + Serial *big.Int + Name string + Days int + DNSNames []string + IPAddresses []net.IP + ExtKeyUsage []x509.ExtKeyUsage +} + +// GenerateCA generates a new CA for agent TLS (not to be confused with Connect TLS) +func GenerateCA(opts CAOpts) (string, string, error) { + signer := opts.Signer + var pk string + if signer == nil { + var err error + signer, pk, err = GeneratePrivateKey() + if err != nil { + return "", "", err + } + } + + id, err := keyID(signer.Public()) + if err != nil { + return "", "", err + } + + sn := opts.Serial + if sn == nil { + var err error + sn, err = GenerateSerialNumber() + if err != nil { + return "", "", err + } + } + name := opts.Name + if name == "" { + name = fmt.Sprintf("Nomad Agent CA %d", sn) + } + + days := opts.Days + if opts.Days == 0 { + days = 365 + } + + // Create the CA cert + template := x509.Certificate{ + SerialNumber: sn, + Subject: pkix.Name{ + Country: []string{"US"}, + PostalCode: []string{"94105"}, + Province: []string{"CA"}, + Locality: []string{"San Francisco"}, + StreetAddress: []string{"101 Second Street"}, + Organization: []string{"HashiCorp Inc."}, + CommonName: name, + }, + BasicConstraintsValid: true, + KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign | x509.KeyUsageDigitalSignature, + IsCA: true, + NotAfter: time.Now().AddDate(0, 0, days), + NotBefore: time.Now(), + AuthorityKeyId: id, + SubjectKeyId: id, + } + + if len(opts.PermittedDNSDomains) > 0 { + template.PermittedDNSDomainsCritical = true + template.PermittedDNSDomains = opts.PermittedDNSDomains + } + bs, err := x509.CreateCertificate( + rand.Reader, &template, &template, signer.Public(), signer) + if err != nil { + return "", "", fmt.Errorf("error generating CA certificate: %s", err) + } + + var buf bytes.Buffer + err = pem.Encode(&buf, &pem.Block{Type: "CERTIFICATE", Bytes: bs}) + if err != nil { + return "", "", fmt.Errorf("error encoding private key: %s", err) + } + + return buf.String(), pk, nil +} + +// GenerateCert generates a new certificate for agent TLS (not to be confused with Connect TLS) +func GenerateCert(opts CertOpts) (string, string, error) { + parent, err := parseCert(opts.CA) + if err != nil { + return "", "", err + } + + signee, pk, err := GeneratePrivateKey() + if err != nil { + return "", "", err + } + + id, err := keyID(signee.Public()) + if err != nil { + return "", "", err + } + + sn := opts.Serial + if sn == nil { + var err error + sn, err = GenerateSerialNumber() + if err != nil { + return "", "", err + } + } + + template := x509.Certificate{ + SerialNumber: sn, + Subject: pkix.Name{CommonName: opts.Name}, + BasicConstraintsValid: true, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment, + ExtKeyUsage: opts.ExtKeyUsage, + IsCA: false, + NotAfter: time.Now().AddDate(0, 0, opts.Days), + NotBefore: time.Now(), + SubjectKeyId: id, + DNSNames: opts.DNSNames, + IPAddresses: opts.IPAddresses, + } + + bs, err := x509.CreateCertificate(rand.Reader, &template, parent, signee.Public(), opts.Signer) + if err != nil { + return "", "", err + } + + var buf bytes.Buffer + err = pem.Encode(&buf, &pem.Block{Type: "CERTIFICATE", Bytes: bs}) + if err != nil { + return "", "", fmt.Errorf("error encoding private key: %s", err) + } + + return buf.String(), pk, nil +} + +// KeyId returns a x509 KeyId from the given signing key. +func keyID(raw interface{}) ([]byte, error) { + switch raw.(type) { + case *ecdsa.PublicKey: + case *rsa.PublicKey: + default: + return nil, fmt.Errorf("invalid key type: %T", raw) + } + + // This is not standard; RFC allows any unique identifier as long as they + // match in subject/authority chains but suggests specific hashing of DER + // bytes of public key including DER tags. + bs, err := x509.MarshalPKIXPublicKey(raw) + if err != nil { + return nil, err + } + + // String formatted + kID := sha256.Sum256(bs) + return kID[:], nil +} + +// ParseCert parses the x509 certificate from a PEM-encoded value. +func ParseCert(pemValue string) (*x509.Certificate, error) { + // The _ result below is not an error but the remaining PEM bytes. + block, _ := pem.Decode([]byte(pemValue)) + if block == nil { + return nil, fmt.Errorf("no PEM-encoded data found") + } + + if block.Type != "CERTIFICATE" { + return nil, fmt.Errorf("first PEM-block should be CERTIFICATE type") + } + + return x509.ParseCertificate(block.Bytes) +} + +func parseCert(pemValue string) (*x509.Certificate, error) { + // The _ result below is not an error but the remaining PEM bytes. + block, _ := pem.Decode([]byte(pemValue)) + if block == nil { + return nil, fmt.Errorf("no PEM-encoded data found") + } + + if block.Type != "CERTIFICATE" { + return nil, fmt.Errorf("first PEM-block should be CERTIFICATE type") + } + + return x509.ParseCertificate(block.Bytes) +} + +// ParseSigner parses a crypto.Signer from a PEM-encoded key. The private key +// is expected to be the first block in the PEM value. +func ParseSigner(pemValue string) (crypto.Signer, error) { + // The _ result below is not an error but the remaining PEM bytes. + block, _ := pem.Decode([]byte(pemValue)) + if block == nil { + return nil, fmt.Errorf("no PEM-encoded data found") + } + + switch block.Type { + case "EC PRIVATE KEY": + return x509.ParseECPrivateKey(block.Bytes) + + case "RSA PRIVATE KEY": + return x509.ParsePKCS1PrivateKey(block.Bytes) + + case "PRIVATE KEY": + signer, err := x509.ParsePKCS8PrivateKey(block.Bytes) + if err != nil { + return nil, err + } + pk, ok := signer.(crypto.Signer) + if !ok { + return nil, fmt.Errorf("private key is not a valid format") + } + + return pk, nil + + default: + return nil, fmt.Errorf("unknown PEM block type for signing key: %s", block.Type) + } +} + +func Verify(caString, certString, dns string) error { + roots := x509.NewCertPool() + ok := roots.AppendCertsFromPEM([]byte(caString)) + if !ok { + return fmt.Errorf("failed to parse root certificate") + } + + cert, err := parseCert(certString) + if err != nil { + return fmt.Errorf("failed to parse certificate") + } + + opts := x509.VerifyOptions{ + DNSName: fmt.Sprint(dns), + Roots: roots, + } + + _, err = cert.Verify(opts) + return err +} diff --git a/vendor/github.com/hashicorp/nomad/nomad/mock/alloc.go b/vendor/github.com/hashicorp/nomad/nomad/mock/alloc.go index 0c716a56..fe99ee73 100644 --- a/vendor/github.com/hashicorp/nomad/nomad/mock/alloc.go +++ b/vendor/github.com/hashicorp/nomad/nomad/mock/alloc.go @@ -79,6 +79,7 @@ func Alloc() *structs.Allocation { ClientStatus: structs.AllocClientStatusPending, } alloc.JobID = alloc.Job.ID + alloc.Canonicalize() return alloc } diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/acl.go b/vendor/github.com/hashicorp/nomad/nomad/structs/acl.go index 72d1b765..079ac494 100644 --- a/vendor/github.com/hashicorp/nomad/nomad/structs/acl.go +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/acl.go @@ -2,6 +2,7 @@ package structs import ( "bytes" + "encoding/json" "errors" "fmt" "regexp" @@ -251,6 +252,57 @@ func (a *ACLToken) HasRoles(roleIDs []string) bool { return true } +// MarshalJSON implements the json.Marshaler interface and allows +// ACLToken.ExpirationTTL to be marshaled correctly. +func (a *ACLToken) MarshalJSON() ([]byte, error) { + type Alias ACLToken + exported := &struct { + ExpirationTTL string + *Alias + }{ + ExpirationTTL: a.ExpirationTTL.String(), + Alias: (*Alias)(a), + } + if a.ExpirationTTL == 0 { + exported.ExpirationTTL = "" + } + return json.Marshal(exported) +} + +// UnmarshalJSON implements the json.Unmarshaler interface and allows +// ACLToken.ExpirationTTL to be unmarshalled correctly. +func (a *ACLToken) UnmarshalJSON(data []byte) (err error) { + type Alias ACLToken + aux := &struct { + ExpirationTTL interface{} + Hash string + *Alias + }{ + Alias: (*Alias)(a), + } + + if err = json.Unmarshal(data, &aux); err != nil { + return err + } + if aux.ExpirationTTL != nil { + switch v := aux.ExpirationTTL.(type) { + case string: + if v != "" { + if a.ExpirationTTL, err = time.ParseDuration(v); err != nil { + return err + } + } + case float64: + a.ExpirationTTL = time.Duration(v) + } + + } + if aux.Hash != "" { + a.Hash = []byte(aux.Hash) + } + return nil +} + // ACLRole is an abstraction for the ACL system which allows the grouping of // ACL policies into a single object. ACL tokens can be created and linked to // a role; the token then inherits all the permissions granted by the policies. diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/bitmap.go b/vendor/github.com/hashicorp/nomad/nomad/structs/bitmap.go index 63758a0b..5763ce3a 100644 --- a/vendor/github.com/hashicorp/nomad/nomad/structs/bitmap.go +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/bitmap.go @@ -1,6 +1,10 @@ package structs -import "fmt" +import ( + "fmt" + + "golang.org/x/exp/slices" +) // Bitmap is a simple uncompressed bitmap type Bitmap []byte @@ -76,3 +80,25 @@ func (b Bitmap) IndexesInRange(set bool, from, to uint) []int { return indexes } + +// IndexesInRangeFiltered returns the indexes in which the values are either set +// or unset based on the passed parameter in the passed range, and do not appear +// in the filter slice +func (b Bitmap) IndexesInRangeFiltered(set bool, from, to uint, filter []int) []int { + var indexes []int + for i := from; i <= to && i < b.Size(); i++ { + c := b.Check(i) + if c == set { + if len(filter) < 1 || !slices.Contains(filter, int(i)) { + indexes = append(indexes, int(i)) + } + } + } + + return indexes +} + +// String represents the Bitmap the same as slice of the Bitmap's set values +func (b Bitmap) String() string { + return fmt.Sprintf("%v", b.IndexesInRange(true, 0, b.Size())) +} diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/config/artifact.go b/vendor/github.com/hashicorp/nomad/nomad/structs/config/artifact.go index f38b29a1..8edcd1ae 100644 --- a/vendor/github.com/hashicorp/nomad/nomad/structs/config/artifact.go +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/config/artifact.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/nomad/helper/pointer" ) -// ArtifactConfig is the configuration specific to the Artifact stanza +// ArtifactConfig is the configuration specific to the Artifact block type ArtifactConfig struct { // HTTPReadTimeout is the duration in which a download must complete or // it will be canceled. Defaults to 30m. diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/config/consul.go b/vendor/github.com/hashicorp/nomad/nomad/structs/config/consul.go index bddd7947..cddbdb54 100644 --- a/vendor/github.com/hashicorp/nomad/nomad/structs/config/consul.go +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/config/consul.go @@ -102,6 +102,11 @@ type ConsulConfig struct { // Uses Consul's default and env var. VerifySSL *bool `hcl:"verify_ssl"` + // GRPCCAFile is the path to the ca certificate used for Consul gRPC communication. + // + // Uses Consul's default and env var. + GRPCCAFile string `hcl:"grpc_ca_file"` + // CAFile is the path to the ca certificate used for Consul communication. // // Uses Consul's default and env var. @@ -219,6 +224,9 @@ func (c *ConsulConfig) Merge(b *ConsulConfig) *ConsulConfig { if b.ShareSSL != nil { result.ShareSSL = pointer.Of(*b.ShareSSL) } + if b.GRPCCAFile != "" { + result.GRPCCAFile = b.GRPCCAFile + } if b.CAFile != "" { result.CAFile = b.CAFile } diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/csi.go b/vendor/github.com/hashicorp/nomad/nomad/structs/csi.go index 2f2d3423..5e4c8996 100644 --- a/vendor/github.com/hashicorp/nomad/nomad/structs/csi.go +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/csi.go @@ -20,11 +20,11 @@ const CSISocketName = "csi.sock" // where Nomad will expect plugins to create intermediary mounts for volumes. const CSIIntermediaryDirname = "volumes" -// VolumeTypeCSI is the type in the volume stanza of a TaskGroup +// VolumeTypeCSI is the type in the volume block of a TaskGroup const VolumeTypeCSI = "csi" // CSIPluginType is an enum string that encapsulates the valid options for a -// CSIPlugin stanza's Type. These modes will allow the plugin to be used in +// CSIPlugin block's Type. These modes will allow the plugin to be used in // different ways by the client. type CSIPluginType string diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/network.go b/vendor/github.com/hashicorp/nomad/nomad/structs/network.go index 81ae54cc..254071df 100644 --- a/vendor/github.com/hashicorp/nomad/nomad/structs/network.go +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/network.go @@ -7,6 +7,7 @@ import ( "sync" "golang.org/x/exp/maps" + "golang.org/x/exp/slices" ) const ( @@ -497,11 +498,12 @@ func incIP(ip net.IP) { } // AssignPorts based on an ask from the scheduler processing a group.network -// stanza. Supports multi-interfaces through node configured host_networks. +// block. Supports multi-interfaces through node configured host_networks. // -// AssignTaskNetwork supports the deprecated task.resources.network stanza. +// AssignTaskNetwork supports the deprecated task.resources.network block. func (idx *NetworkIndex) AssignPorts(ask *NetworkResource) (AllocatedPorts, error) { var offer AllocatedPorts + var portsInOffer []int // index of host network name to slice of reserved ports, used during dynamic port assignment reservedIdx := map[string][]Port{} @@ -543,6 +545,7 @@ func (idx *NetworkIndex) AssignPorts(ask *NetworkResource) (AllocatedPorts, erro } offer = append(offer, *allocPort) + portsInOffer = append(portsInOffer, allocPort.Value) } for _, port := range ask.DynamicPorts { @@ -554,10 +557,14 @@ func (idx *NetworkIndex) AssignPorts(ask *NetworkResource) (AllocatedPorts, erro // lower memory usage. var dynPorts []int // TODO: its more efficient to find multiple dynamic ports at once - dynPorts, addrErr = getDynamicPortsStochastic(used, idx.MinDynamicPort, idx.MaxDynamicPort, reservedIdx[port.HostNetwork], 1) + dynPorts, addrErr = getDynamicPortsStochastic( + used, portsInOffer, idx.MinDynamicPort, idx.MaxDynamicPort, + reservedIdx[port.HostNetwork], 1) if addrErr != nil { // Fall back to the precise method if the random sampling failed. - dynPorts, addrErr = getDynamicPortsPrecise(used, idx.MinDynamicPort, idx.MaxDynamicPort, reservedIdx[port.HostNetwork], 1) + dynPorts, addrErr = getDynamicPortsPrecise(used, portsInOffer, + idx.MinDynamicPort, idx.MaxDynamicPort, + reservedIdx[port.HostNetwork], 1) if addrErr != nil { continue } @@ -583,6 +590,7 @@ func (idx *NetworkIndex) AssignPorts(ask *NetworkResource) (AllocatedPorts, erro return nil, fmt.Errorf("no addresses available for %s network", port.HostNetwork) } offer = append(offer, *allocPort) + portsInOffer = append(portsInOffer, allocPort.Value) } return offer, nil @@ -641,13 +649,15 @@ func (idx *NetworkIndex) AssignTaskNetwork(ask *NetworkResource) (out *NetworkRe // lower memory usage. var dynPorts []int var dynErr error - dynPorts, dynErr = getDynamicPortsStochastic(used, idx.MinDynamicPort, idx.MaxDynamicPort, ask.ReservedPorts, len(ask.DynamicPorts)) + dynPorts, dynErr = getDynamicPortsStochastic(used, nil, + idx.MinDynamicPort, idx.MaxDynamicPort, ask.ReservedPorts, len(ask.DynamicPorts)) if dynErr == nil { goto BUILD_OFFER } // Fall back to the precise method if the random sampling failed. - dynPorts, dynErr = getDynamicPortsPrecise(used, idx.MinDynamicPort, idx.MaxDynamicPort, ask.ReservedPorts, len(ask.DynamicPorts)) + dynPorts, dynErr = getDynamicPortsPrecise(used, nil, + idx.MinDynamicPort, idx.MaxDynamicPort, ask.ReservedPorts, len(ask.DynamicPorts)) if dynErr != nil { err = dynErr return @@ -673,10 +683,11 @@ func (idx *NetworkIndex) AssignTaskNetwork(ask *NetworkResource) (out *NetworkRe } // getDynamicPortsPrecise takes the nodes used port bitmap which may be nil if -// no ports have been allocated yet, the network ask and returns a set of unused -// ports to fulfil the ask's DynamicPorts or an error if it failed. An error -// means the ask can not be satisfied as the method does a precise search. -func getDynamicPortsPrecise(nodeUsed Bitmap, minDynamicPort, maxDynamicPort int, reserved []Port, numDyn int) ([]int, error) { +// no ports have been allocated yet, any ports already offered in the caller, +// and the network ask. It returns a set of unused ports to fulfil the ask's +// DynamicPorts or an error if it failed. An error means the ask can not be +// satisfied as the method does a precise search. +func getDynamicPortsPrecise(nodeUsed Bitmap, portsInOffer []int, minDynamicPort, maxDynamicPort int, reserved []Port, numDyn int) ([]int, error) { // Create a copy of the used ports and apply the new reserves var usedSet Bitmap var err error @@ -696,8 +707,10 @@ func getDynamicPortsPrecise(nodeUsed Bitmap, minDynamicPort, maxDynamicPort int, usedSet.Set(uint(port.Value)) } - // Get the indexes of the unset - availablePorts := usedSet.IndexesInRange(false, uint(minDynamicPort), uint(maxDynamicPort)) + // Get the indexes of the unset ports, less those which have already been + // picked as part of this offer + availablePorts := usedSet.IndexesInRangeFiltered( + false, uint(minDynamicPort), uint(maxDynamicPort), portsInOffer) // Randomize the amount we need if len(availablePorts) < numDyn { @@ -713,12 +726,13 @@ func getDynamicPortsPrecise(nodeUsed Bitmap, minDynamicPort, maxDynamicPort int, return availablePorts[:numDyn], nil } -// getDynamicPortsStochastic takes the nodes used port bitmap which may be nil if -// no ports have been allocated yet, the network ask and returns a set of unused -// ports to fulfil the ask's DynamicPorts or an error if it failed. An error -// does not mean the ask can not be satisfied as the method has a fixed amount -// of random probes and if these fail, the search is aborted. -func getDynamicPortsStochastic(nodeUsed Bitmap, minDynamicPort, maxDynamicPort int, reservedPorts []Port, count int) ([]int, error) { +// getDynamicPortsStochastic takes the nodes used port bitmap which may be nil +// if no ports have been allocated yet, any ports already offered in the caller, +// and the network ask. It returns a set of unused ports to fulfil the ask's +// DynamicPorts or an error if it failed. An error does not mean the ask can not +// be satisfied as the method has a fixed amount of random probes and if these +// fail, the search is aborted. +func getDynamicPortsStochastic(nodeUsed Bitmap, portsInOffer []int, minDynamicPort, maxDynamicPort int, reservedPorts []Port, count int) ([]int, error) { var reserved, dynamic []int for _, port := range reservedPorts { reserved = append(reserved, port.Value) @@ -742,6 +756,12 @@ func getDynamicPortsStochastic(nodeUsed Bitmap, minDynamicPort, maxDynamicPort i goto PICK } } + // the pick conflicted with a previous pick that hasn't been saved to + // the index yet + if slices.Contains(portsInOffer, randPort) { + goto PICK + } + dynamic = append(dynamic, randPort) } diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/operator.go b/vendor/github.com/hashicorp/nomad/nomad/structs/operator.go index a6cfced9..667890ee 100644 --- a/vendor/github.com/hashicorp/nomad/nomad/structs/operator.go +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/operator.go @@ -135,7 +135,7 @@ func (a *AutopilotConfig) Copy() *AutopilotConfig { } // SchedulerAlgorithm is an enum string that encapsulates the valid options for a -// SchedulerConfiguration stanza's SchedulerAlgorithm. These modes will allow the +// SchedulerConfiguration block's SchedulerAlgorithm. These modes will allow the // scheduler to be user-selectable. type SchedulerAlgorithm string diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/services.go b/vendor/github.com/hashicorp/nomad/nomad/structs/services.go index b4ff34c0..f0938088 100644 --- a/vendor/github.com/hashicorp/nomad/nomad/structs/services.go +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/services.go @@ -84,7 +84,7 @@ func (sc *ServiceCheck) IsReadiness() bool { return sc != nil && sc.OnUpdate == OnUpdateIgnore } -// Copy the stanza recursively. Returns nil if nil. +// Copy the block recursively. Returns nil if nil. func (sc *ServiceCheck) Copy() *ServiceCheck { if sc == nil { return nil @@ -595,7 +595,7 @@ type Service struct { Provider string } -// Copy the stanza recursively. Returns nil if nil. +// Copy the block recursively. Returns nil if nil. func (s *Service) Copy() *Service { if s == nil { return nil @@ -952,7 +952,7 @@ func (s *Service) Equals(o *Service) bool { return true } -// ConsulConnect represents a Consul Connect jobspec stanza. +// ConsulConnect represents a Consul Connect jobspec block. type ConsulConnect struct { // Native indicates whether the service is Consul Connect Native enabled. Native bool @@ -967,7 +967,7 @@ type ConsulConnect struct { Gateway *ConsulGateway } -// Copy the stanza recursively. Returns nil if nil. +// Copy the block recursively. Returns nil if nil. func (c *ConsulConnect) Copy() *ConsulConnect { if c == nil { return nil @@ -1084,7 +1084,7 @@ func (c *ConsulConnect) Validate() error { } // ConsulSidecarService represents a Consul Connect SidecarService jobspec -// stanza. +// block. type ConsulSidecarService struct { // Tags are optional service tags that get registered with the sidecar service // in Consul. If unset, the sidecar service inherits the parent service tags. @@ -1094,7 +1094,7 @@ type ConsulSidecarService struct { // a port label or a literal port number. Port string - // Proxy stanza defining the sidecar proxy configuration. + // Proxy block defining the sidecar proxy configuration. Proxy *ConsulProxy // DisableDefaultTCPCheck, if true, instructs Nomad to avoid setting a @@ -1107,7 +1107,7 @@ func (s *ConsulSidecarService) HasUpstreams() bool { return s != nil && s.Proxy != nil && len(s.Proxy.Upstreams) > 0 } -// Copy the stanza recursively. Returns nil if nil. +// Copy the block recursively. Returns nil if nil. func (s *ConsulSidecarService) Copy() *ConsulSidecarService { if s == nil { return nil @@ -1142,7 +1142,7 @@ func (s *ConsulSidecarService) Equals(o *ConsulSidecarService) bool { } // SidecarTask represents a subset of Task fields that are able to be overridden -// from the sidecar_task stanza +// from the sidecar_task block type SidecarTask struct { // Name of the task Name string @@ -1336,7 +1336,7 @@ func (t *SidecarTask) MergeIntoTask(task *Task) { } } -// ConsulProxy represents a Consul Connect sidecar proxy jobspec stanza. +// ConsulProxy represents a Consul Connect sidecar proxy jobspec block. type ConsulProxy struct { // LocalServiceAddress is the address the local service binds to. @@ -1353,18 +1353,16 @@ type ConsulProxy struct { // connect to. Upstreams []ConsulUpstream - // Expose configures the consul proxy.expose stanza to "open up" endpoints + // Expose configures the consul proxy.expose block to "open up" endpoints // used by task-group level service checks using HTTP or gRPC protocols. - // - // Use json tag to match with field name in api/ - Expose *ConsulExposeConfig `json:"ExposeConfig"` + Expose *ConsulExposeConfig // Config is a proxy configuration. It is opaque to Nomad and passed // directly to Consul. Config map[string]interface{} } -// Copy the stanza recursively. Returns nil if nil. +// Copy the block recursively. Returns nil if nil. func (p *ConsulProxy) Copy() *ConsulProxy { if p == nil { return nil @@ -1459,7 +1457,7 @@ func (c *ConsulMeshGateway) Validate() error { } } -// ConsulUpstream represents a Consul Connect upstream jobspec stanza. +// ConsulUpstream represents a Consul Connect upstream jobspec block. type ConsulUpstream struct { // DestinationName is the name of the upstream service. DestinationName string @@ -1495,10 +1493,9 @@ func upstreamsEquals(a, b []ConsulUpstream) bool { return set.From(a).Equal(set.From(b)) } -// ConsulExposeConfig represents a Consul Connect expose jobspec stanza. +// ConsulExposeConfig represents a Consul Connect expose jobspec block. type ConsulExposeConfig struct { - // Use json tag to match with field name in api/ - Paths []ConsulExposePath `json:"Path"` + Paths []ConsulExposePath } type ConsulExposePath struct { @@ -1512,7 +1509,7 @@ func exposePathsEqual(a, b []ConsulExposePath) bool { return helper.SliceSetEq(a, b) } -// Copy the stanza. Returns nil if e is nil. +// Copy the block. Returns nil if e is nil. func (e *ConsulExposeConfig) Copy() *ConsulExposeConfig { if e == nil { return nil @@ -1874,13 +1871,13 @@ func (s *ConsulIngressService) Validate(protocol string) error { return nil } + // pre-validate service Name and Hosts before passing along to consul: + // https://developer.hashicorp.com/consul/docs/connect/config-entries/ingress-gateway#services + if s.Name == "" { return errors.New("Consul Ingress Service requires a name") } - // Validation of wildcard service name and hosts varies depending on the - // protocol for the gateway. - // https://www.consul.io/docs/connect/config-entries/ingress-gateway#hosts switch protocol { case "tcp": if s.Name == "*" { @@ -1891,12 +1888,8 @@ func (s *ConsulIngressService) Validate(protocol string) error { return errors.New(`Consul Ingress Service doesn't support associating hosts to a service for the "tcp" protocol`) } default: - if s.Name == "*" { - return nil - } - - if len(s.Hosts) == 0 { - return fmt.Errorf("Consul Ingress Service requires one or more hosts when using %q protocol", protocol) + if s.Name == "*" && len(s.Hosts) != 0 { + return errors.New(`Consul Ingress Service with a wildcard "*" service name can not also specify hosts`) } } diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/structs.generated.go b/vendor/github.com/hashicorp/nomad/nomad/structs/structs.generated.go index 86bbad78..5d8beb2e 100644 --- a/vendor/github.com/hashicorp/nomad/nomad/structs/structs.generated.go +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/structs.generated.go @@ -80178,9 +80178,9 @@ func (x *JobRegisterRequest) CodecEncodeSelf(e *codec1978.Encoder) { _, _ = yysep2, yy2arr2 const yyr2 bool = false // struct tag has 'toArray' if yyr2 || yy2arr2 { - r.WriteArrayStart(12) + r.WriteArrayStart(13) } else { - r.WriteMapStart(12) + r.WriteMapStart(13) } var yyn3 bool if x.Job == nil { @@ -80349,6 +80349,42 @@ func (x *JobRegisterRequest) CodecEncodeSelf(e *codec1978.Encoder) { } } } + var yyn24 bool + if x.Deployment == nil { + yyn24 = true + goto LABEL24 + } + LABEL24: + if yyr2 || yy2arr2 { + if yyn24 { + r.WriteArrayElem() + r.EncodeNil() + } else { + r.WriteArrayElem() + if x.Deployment == nil { + r.EncodeNil() + } else { + x.Deployment.CodecEncodeSelf(e) + } + } + } else { + r.WriteMapElemKey() + if z.IsJSONHandle() { + z.WriteStr("\"Deployment\"") + } else { + r.EncodeStringEnc(codecSelferCcUTF8100, `Deployment`) + } + r.WriteMapElemValue() + if yyn24 { + r.EncodeNil() + } else { + if x.Deployment == nil { + r.EncodeNil() + } else { + x.Deployment.CodecEncodeSelf(e) + } + } + } if yyr2 || yy2arr2 { r.WriteArrayElem() if false { @@ -80587,6 +80623,18 @@ func (x *JobRegisterRequest) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) x.Eval.CodecDecodeSelf(d) } + case "Deployment": + if r.TryDecodeAsNil() { + if true && x.Deployment != nil { + x.Deployment = nil + } + } else { + if x.Deployment == nil { + x.Deployment = new(Deployment) + } + + x.Deployment.CodecDecodeSelf(d) + } case "Region": if r.TryDecodeAsNil() { x.WriteRequest.Region = "" @@ -80628,16 +80676,16 @@ func (x *JobRegisterRequest) codecDecodeSelfFromArray(l int, d *codec1978.Decode var h codecSelfer100 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj16 int - var yyb16 bool - var yyhl16 bool = l >= 0 - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + var yyj17 int + var yyb17 bool + var yyhl17 bool = l >= 0 + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l } else { - yyb16 = r.CheckBreak() + yyb17 = r.CheckBreak() } - if yyb16 { + if yyb17 { r.ReadArrayEnd() return } @@ -80653,13 +80701,13 @@ func (x *JobRegisterRequest) codecDecodeSelfFromArray(l int, d *codec1978.Decode x.Job.CodecDecodeSelf(d) } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l } else { - yyb16 = r.CheckBreak() + yyb17 = r.CheckBreak() } - if yyb16 { + if yyb17 { r.ReadArrayEnd() return } @@ -80669,13 +80717,13 @@ func (x *JobRegisterRequest) codecDecodeSelfFromArray(l int, d *codec1978.Decode } else { x.EnforceIndex = (bool)(r.DecodeBool()) } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l } else { - yyb16 = r.CheckBreak() + yyb17 = r.CheckBreak() } - if yyb16 { + if yyb17 { r.ReadArrayEnd() return } @@ -80685,13 +80733,13 @@ func (x *JobRegisterRequest) codecDecodeSelfFromArray(l int, d *codec1978.Decode } else { x.JobModifyIndex = (uint64)(r.DecodeUint64()) } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l } else { - yyb16 = r.CheckBreak() + yyb17 = r.CheckBreak() } - if yyb16 { + if yyb17 { r.ReadArrayEnd() return } @@ -80701,13 +80749,13 @@ func (x *JobRegisterRequest) codecDecodeSelfFromArray(l int, d *codec1978.Decode } else { x.PreserveCounts = (bool)(r.DecodeBool()) } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l } else { - yyb16 = r.CheckBreak() + yyb17 = r.CheckBreak() } - if yyb16 { + if yyb17 { r.ReadArrayEnd() return } @@ -80717,13 +80765,13 @@ func (x *JobRegisterRequest) codecDecodeSelfFromArray(l int, d *codec1978.Decode } else { x.PolicyOverride = (bool)(r.DecodeBool()) } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l } else { - yyb16 = r.CheckBreak() + yyb17 = r.CheckBreak() } - if yyb16 { + if yyb17 { r.ReadArrayEnd() return } @@ -80733,13 +80781,13 @@ func (x *JobRegisterRequest) codecDecodeSelfFromArray(l int, d *codec1978.Decode } else { x.EvalPriority = (int)(z.C.IntV(r.DecodeInt64(), codecSelferBitsize100)) } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l } else { - yyb16 = r.CheckBreak() + yyb17 = r.CheckBreak() } - if yyb16 { + if yyb17 { r.ReadArrayEnd() return } @@ -80755,13 +80803,35 @@ func (x *JobRegisterRequest) codecDecodeSelfFromArray(l int, d *codec1978.Decode x.Eval.CodecDecodeSelf(d) } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l } else { - yyb16 = r.CheckBreak() + yyb17 = r.CheckBreak() } - if yyb16 { + if yyb17 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + if true && x.Deployment != nil { + x.Deployment = nil + } + } else { + if x.Deployment == nil { + x.Deployment = new(Deployment) + } + + x.Deployment.CodecDecodeSelf(d) + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { r.ReadArrayEnd() return } @@ -80771,13 +80841,13 @@ func (x *JobRegisterRequest) codecDecodeSelfFromArray(l int, d *codec1978.Decode } else { x.Region = (string)(r.DecodeString()) } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l } else { - yyb16 = r.CheckBreak() + yyb17 = r.CheckBreak() } - if yyb16 { + if yyb17 { r.ReadArrayEnd() return } @@ -80787,13 +80857,13 @@ func (x *JobRegisterRequest) codecDecodeSelfFromArray(l int, d *codec1978.Decode } else { x.Namespace = (string)(r.DecodeString()) } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l } else { - yyb16 = r.CheckBreak() + yyb17 = r.CheckBreak() } - if yyb16 { + if yyb17 { r.ReadArrayEnd() return } @@ -80803,13 +80873,13 @@ func (x *JobRegisterRequest) codecDecodeSelfFromArray(l int, d *codec1978.Decode } else { x.AuthToken = (string)(r.DecodeString()) } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l } else { - yyb16 = r.CheckBreak() + yyb17 = r.CheckBreak() } - if yyb16 { + if yyb17 { r.ReadArrayEnd() return } @@ -80819,13 +80889,13 @@ func (x *JobRegisterRequest) codecDecodeSelfFromArray(l int, d *codec1978.Decode } else { x.IdempotencyToken = (string)(r.DecodeString()) } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l } else { - yyb16 = r.CheckBreak() + yyb17 = r.CheckBreak() } - if yyb16 { + if yyb17 { r.ReadArrayEnd() return } @@ -80836,17 +80906,17 @@ func (x *JobRegisterRequest) codecDecodeSelfFromArray(l int, d *codec1978.Decode x.Forwarded = (bool)(r.DecodeBool()) } for { - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l } else { - yyb16 = r.CheckBreak() + yyb17 = r.CheckBreak() } - if yyb16 { + if yyb17 { break } r.ReadArrayElem() - z.DecStructFieldNotFound(yyj16-1, "") + z.DecStructFieldNotFound(yyj17-1, "") } r.ReadArrayEnd() } @@ -131294,9 +131364,9 @@ func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) { _, _ = yysep2, yy2arr2 const yyr2 bool = false // struct tag has 'toArray' if yyr2 || yy2arr2 { - r.WriteArrayStart(30) + r.WriteArrayStart(32) } else { - r.WriteMapStart(30) + r.WriteMapStart(32) } if yyr2 || yy2arr2 { r.WriteArrayElem() @@ -132092,6 +132162,44 @@ func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) { } } } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + if false { + } else { + r.EncodeUint(uint64(x.LastMissedHeartbeatIndex)) + } + } else { + r.WriteMapElemKey() + if z.IsJSONHandle() { + z.WriteStr("\"LastMissedHeartbeatIndex\"") + } else { + r.EncodeStringEnc(codecSelferCcUTF8100, `LastMissedHeartbeatIndex`) + } + r.WriteMapElemValue() + if false { + } else { + r.EncodeUint(uint64(x.LastMissedHeartbeatIndex)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + if false { + } else { + r.EncodeUint(uint64(x.LastAllocUpdateIndex)) + } + } else { + r.WriteMapElemKey() + if z.IsJSONHandle() { + z.WriteStr("\"LastAllocUpdateIndex\"") + } else { + r.EncodeStringEnc(codecSelferCcUTF8100, `LastAllocUpdateIndex`) + } + r.WriteMapElemValue() + if false { + } else { + r.EncodeUint(uint64(x.LastAllocUpdateIndex)) + } + } if yyr2 || yy2arr2 { r.WriteArrayElem() if false { @@ -132418,6 +132526,18 @@ func (x *Node) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { x.LastDrain.CodecDecodeSelf(d) } + case "LastMissedHeartbeatIndex": + if r.TryDecodeAsNil() { + x.LastMissedHeartbeatIndex = 0 + } else { + x.LastMissedHeartbeatIndex = (uint64)(r.DecodeUint64()) + } + case "LastAllocUpdateIndex": + if r.TryDecodeAsNil() { + x.LastAllocUpdateIndex = 0 + } else { + x.LastAllocUpdateIndex = (uint64)(r.DecodeUint64()) + } case "CreateIndex": if r.TryDecodeAsNil() { x.CreateIndex = 0 @@ -132441,16 +132561,16 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer100 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj43 int - var yyb43 bool - var yyhl43 bool = l >= 0 - yyj43++ - if yyhl43 { - yyb43 = yyj43 > l + var yyj45 int + var yyb45 bool + var yyhl45 bool = l >= 0 + yyj45++ + if yyhl45 { + yyb45 = yyj45 > l } else { - yyb43 = r.CheckBreak() + yyb45 = r.CheckBreak() } - if yyb43 { + if yyb45 { r.ReadArrayEnd() return } @@ -132460,13 +132580,13 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.ID = (string)(r.DecodeString()) } - yyj43++ - if yyhl43 { - yyb43 = yyj43 > l + yyj45++ + if yyhl45 { + yyb45 = yyj45 > l } else { - yyb43 = r.CheckBreak() + yyb45 = r.CheckBreak() } - if yyb43 { + if yyb45 { r.ReadArrayEnd() return } @@ -132476,13 +132596,13 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.SecretID = (string)(r.DecodeString()) } - yyj43++ - if yyhl43 { - yyb43 = yyj43 > l + yyj45++ + if yyhl45 { + yyb45 = yyj45 > l } else { - yyb43 = r.CheckBreak() + yyb45 = r.CheckBreak() } - if yyb43 { + if yyb45 { r.ReadArrayEnd() return } @@ -132492,13 +132612,13 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.Datacenter = (string)(r.DecodeString()) } - yyj43++ - if yyhl43 { - yyb43 = yyj43 > l + yyj45++ + if yyhl45 { + yyb45 = yyj45 > l } else { - yyb43 = r.CheckBreak() + yyb45 = r.CheckBreak() } - if yyb43 { + if yyb45 { r.ReadArrayEnd() return } @@ -132508,13 +132628,13 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.Name = (string)(r.DecodeString()) } - yyj43++ - if yyhl43 { - yyb43 = yyj43 > l + yyj45++ + if yyhl45 { + yyb45 = yyj45 > l } else { - yyb43 = r.CheckBreak() + yyb45 = r.CheckBreak() } - if yyb43 { + if yyb45 { r.ReadArrayEnd() return } @@ -132524,13 +132644,13 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.CgroupParent = (string)(r.DecodeString()) } - yyj43++ - if yyhl43 { - yyb43 = yyj43 > l + yyj45++ + if yyhl45 { + yyb45 = yyj45 > l } else { - yyb43 = r.CheckBreak() + yyb45 = r.CheckBreak() } - if yyb43 { + if yyb45 { r.ReadArrayEnd() return } @@ -132540,13 +132660,13 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.HTTPAddr = (string)(r.DecodeString()) } - yyj43++ - if yyhl43 { - yyb43 = yyj43 > l + yyj45++ + if yyhl45 { + yyb45 = yyj45 > l } else { - yyb43 = r.CheckBreak() + yyb45 = r.CheckBreak() } - if yyb43 { + if yyb45 { r.ReadArrayEnd() return } @@ -132556,13 +132676,13 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.TLSEnabled = (bool)(r.DecodeBool()) } - yyj43++ - if yyhl43 { - yyb43 = yyj43 > l + yyj45++ + if yyhl45 { + yyb45 = yyj45 > l } else { - yyb43 = r.CheckBreak() + yyb45 = r.CheckBreak() } - if yyb43 { + if yyb45 { r.ReadArrayEnd() return } @@ -132575,13 +132695,13 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { z.F.DecMapStringStringX(&x.Attributes, d) } } - yyj43++ - if yyhl43 { - yyb43 = yyj43 > l + yyj45++ + if yyhl45 { + yyb45 = yyj45 > l } else { - yyb43 = r.CheckBreak() + yyb45 = r.CheckBreak() } - if yyb43 { + if yyb45 { r.ReadArrayEnd() return } @@ -132597,13 +132717,13 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { x.NodeResources.CodecDecodeSelf(d) } - yyj43++ - if yyhl43 { - yyb43 = yyj43 > l + yyj45++ + if yyhl45 { + yyb45 = yyj45 > l } else { - yyb43 = r.CheckBreak() + yyb45 = r.CheckBreak() } - if yyb43 { + if yyb45 { r.ReadArrayEnd() return } @@ -132619,13 +132739,13 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { x.ReservedResources.CodecDecodeSelf(d) } - yyj43++ - if yyhl43 { - yyb43 = yyj43 > l + yyj45++ + if yyhl45 { + yyb45 = yyj45 > l } else { - yyb43 = r.CheckBreak() + yyb45 = r.CheckBreak() } - if yyb43 { + if yyb45 { r.ReadArrayEnd() return } @@ -132641,13 +132761,13 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { x.Resources.CodecDecodeSelf(d) } - yyj43++ - if yyhl43 { - yyb43 = yyj43 > l + yyj45++ + if yyhl45 { + yyb45 = yyj45 > l } else { - yyb43 = r.CheckBreak() + yyb45 = r.CheckBreak() } - if yyb43 { + if yyb45 { r.ReadArrayEnd() return } @@ -132663,13 +132783,13 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { x.Reserved.CodecDecodeSelf(d) } - yyj43++ - if yyhl43 { - yyb43 = yyj43 > l + yyj45++ + if yyhl45 { + yyb45 = yyj45 > l } else { - yyb43 = r.CheckBreak() + yyb45 = r.CheckBreak() } - if yyb43 { + if yyb45 { r.ReadArrayEnd() return } @@ -132682,13 +132802,13 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { z.F.DecMapStringStringX(&x.Links, d) } } - yyj43++ - if yyhl43 { - yyb43 = yyj43 > l + yyj45++ + if yyhl45 { + yyb45 = yyj45 > l } else { - yyb43 = r.CheckBreak() + yyb45 = r.CheckBreak() } - if yyb43 { + if yyb45 { r.ReadArrayEnd() return } @@ -132701,13 +132821,13 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { z.F.DecMapStringStringX(&x.Meta, d) } } - yyj43++ - if yyhl43 { - yyb43 = yyj43 > l + yyj45++ + if yyhl45 { + yyb45 = yyj45 > l } else { - yyb43 = r.CheckBreak() + yyb45 = r.CheckBreak() } - if yyb43 { + if yyb45 { r.ReadArrayEnd() return } @@ -132717,13 +132837,13 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.NodeClass = (string)(r.DecodeString()) } - yyj43++ - if yyhl43 { - yyb43 = yyj43 > l + yyj45++ + if yyhl45 { + yyb45 = yyj45 > l } else { - yyb43 = r.CheckBreak() + yyb45 = r.CheckBreak() } - if yyb43 { + if yyb45 { r.ReadArrayEnd() return } @@ -132733,13 +132853,13 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.ComputedClass = (string)(r.DecodeString()) } - yyj43++ - if yyhl43 { - yyb43 = yyj43 > l + yyj45++ + if yyhl45 { + yyb45 = yyj45 > l } else { - yyb43 = r.CheckBreak() + yyb45 = r.CheckBreak() } - if yyb43 { + if yyb45 { r.ReadArrayEnd() return } @@ -132755,13 +132875,13 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { x.DrainStrategy.CodecDecodeSelf(d) } - yyj43++ - if yyhl43 { - yyb43 = yyj43 > l + yyj45++ + if yyhl45 { + yyb45 = yyj45 > l } else { - yyb43 = r.CheckBreak() + yyb45 = r.CheckBreak() } - if yyb43 { + if yyb45 { r.ReadArrayEnd() return } @@ -132771,13 +132891,13 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.SchedulingEligibility = (string)(r.DecodeString()) } - yyj43++ - if yyhl43 { - yyb43 = yyj43 > l + yyj45++ + if yyhl45 { + yyb45 = yyj45 > l } else { - yyb43 = r.CheckBreak() + yyb45 = r.CheckBreak() } - if yyb43 { + if yyb45 { r.ReadArrayEnd() return } @@ -132787,13 +132907,13 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.Status = (string)(r.DecodeString()) } - yyj43++ - if yyhl43 { - yyb43 = yyj43 > l + yyj45++ + if yyhl45 { + yyb45 = yyj45 > l } else { - yyb43 = r.CheckBreak() + yyb45 = r.CheckBreak() } - if yyb43 { + if yyb45 { r.ReadArrayEnd() return } @@ -132803,13 +132923,13 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.StatusDescription = (string)(r.DecodeString()) } - yyj43++ - if yyhl43 { - yyb43 = yyj43 > l + yyj45++ + if yyhl45 { + yyb45 = yyj45 > l } else { - yyb43 = r.CheckBreak() + yyb45 = r.CheckBreak() } - if yyb43 { + if yyb45 { r.ReadArrayEnd() return } @@ -132819,13 +132939,13 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.StatusUpdatedAt = (int64)(r.DecodeInt64()) } - yyj43++ - if yyhl43 { - yyb43 = yyj43 > l + yyj45++ + if yyhl45 { + yyb45 = yyj45 > l } else { - yyb43 = r.CheckBreak() + yyb45 = r.CheckBreak() } - if yyb43 { + if yyb45 { r.ReadArrayEnd() return } @@ -132838,13 +132958,13 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { h.decSlicePtrtoNodeEvent((*[]*NodeEvent)(&x.Events), d) } } - yyj43++ - if yyhl43 { - yyb43 = yyj43 > l + yyj45++ + if yyhl45 { + yyb45 = yyj45 > l } else { - yyb43 = r.CheckBreak() + yyb45 = r.CheckBreak() } - if yyb43 { + if yyb45 { r.ReadArrayEnd() return } @@ -132857,13 +132977,13 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { h.decMapstringPtrtoDriverInfo((*map[string]*DriverInfo)(&x.Drivers), d) } } - yyj43++ - if yyhl43 { - yyb43 = yyj43 > l + yyj45++ + if yyhl45 { + yyb45 = yyj45 > l } else { - yyb43 = r.CheckBreak() + yyb45 = r.CheckBreak() } - if yyb43 { + if yyb45 { r.ReadArrayEnd() return } @@ -132876,13 +132996,13 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { h.decMapstringPtrtoCSIInfo((*map[string]*CSIInfo)(&x.CSIControllerPlugins), d) } } - yyj43++ - if yyhl43 { - yyb43 = yyj43 > l + yyj45++ + if yyhl45 { + yyb45 = yyj45 > l } else { - yyb43 = r.CheckBreak() + yyb45 = r.CheckBreak() } - if yyb43 { + if yyb45 { r.ReadArrayEnd() return } @@ -132895,13 +133015,13 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { h.decMapstringPtrtoCSIInfo((*map[string]*CSIInfo)(&x.CSINodePlugins), d) } } - yyj43++ - if yyhl43 { - yyb43 = yyj43 > l + yyj45++ + if yyhl45 { + yyb45 = yyj45 > l } else { - yyb43 = r.CheckBreak() + yyb45 = r.CheckBreak() } - if yyb43 { + if yyb45 { r.ReadArrayEnd() return } @@ -132914,13 +133034,13 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { h.decMapstringPtrtoClientHostVolumeConfig((*map[string]*ClientHostVolumeConfig)(&x.HostVolumes), d) } } - yyj43++ - if yyhl43 { - yyb43 = yyj43 > l + yyj45++ + if yyhl45 { + yyb45 = yyj45 > l } else { - yyb43 = r.CheckBreak() + yyb45 = r.CheckBreak() } - if yyb43 { + if yyb45 { r.ReadArrayEnd() return } @@ -132933,13 +133053,13 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { h.decMapstringPtrtoClientHostNetworkConfig((*map[string]*ClientHostNetworkConfig)(&x.HostNetworks), d) } } - yyj43++ - if yyhl43 { - yyb43 = yyj43 > l + yyj45++ + if yyhl45 { + yyb45 = yyj45 > l } else { - yyb43 = r.CheckBreak() + yyb45 = r.CheckBreak() } - if yyb43 { + if yyb45 { r.ReadArrayEnd() return } @@ -132955,13 +133075,45 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { x.LastDrain.CodecDecodeSelf(d) } - yyj43++ - if yyhl43 { - yyb43 = yyj43 > l + yyj45++ + if yyhl45 { + yyb45 = yyj45 > l + } else { + yyb45 = r.CheckBreak() + } + if yyb45 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.LastMissedHeartbeatIndex = 0 + } else { + x.LastMissedHeartbeatIndex = (uint64)(r.DecodeUint64()) + } + yyj45++ + if yyhl45 { + yyb45 = yyj45 > l + } else { + yyb45 = r.CheckBreak() + } + if yyb45 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.LastAllocUpdateIndex = 0 + } else { + x.LastAllocUpdateIndex = (uint64)(r.DecodeUint64()) + } + yyj45++ + if yyhl45 { + yyb45 = yyj45 > l } else { - yyb43 = r.CheckBreak() + yyb45 = r.CheckBreak() } - if yyb43 { + if yyb45 { r.ReadArrayEnd() return } @@ -132971,13 +133123,13 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.CreateIndex = (uint64)(r.DecodeUint64()) } - yyj43++ - if yyhl43 { - yyb43 = yyj43 > l + yyj45++ + if yyhl45 { + yyb45 = yyj45 > l } else { - yyb43 = r.CheckBreak() + yyb45 = r.CheckBreak() } - if yyb43 { + if yyb45 { r.ReadArrayEnd() return } @@ -132988,17 +133140,17 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { x.ModifyIndex = (uint64)(r.DecodeUint64()) } for { - yyj43++ - if yyhl43 { - yyb43 = yyj43 > l + yyj45++ + if yyhl45 { + yyb45 = yyj45 > l } else { - yyb43 = r.CheckBreak() + yyb45 = r.CheckBreak() } - if yyb43 { + if yyb45 { break } r.ReadArrayElem() - z.DecStructFieldNotFound(yyj43-1, "") + z.DecStructFieldNotFound(yyj45-1, "") } r.ReadArrayEnd() } @@ -186900,6 +187052,8 @@ func (x *ACLToken) CodecEncodeSelf(e *codec1978.Encoder) { if false { } else if yyxt1 := z.Extension(z.I2Rtid(x)); yyxt1 != nil { z.EncExtension(x, yyxt1) + } else if !z.EncBinary() && z.IsJSONHandle() { + z.EncJSONMarshal(x) } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray @@ -187290,6 +187444,8 @@ func (x *ACLToken) CodecDecodeSelf(d *codec1978.Decoder) { if false { } else if yyxt1 := z.Extension(z.I2Rtid(x)); yyxt1 != nil { z.DecExtension(x, yyxt1) + } else if !z.DecBinary() && z.IsJSONHandle() { + z.DecJSONUnmarshal(x) } else { yyct2 := r.ContainerType() if yyct2 == codecSelferValueTypeMap100 { diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/structs.go b/vendor/github.com/hashicorp/nomad/nomad/structs/structs.go index 5e3abb66..dbf68ef2 100644 --- a/vendor/github.com/hashicorp/nomad/nomad/structs/structs.go +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/structs.go @@ -612,6 +612,10 @@ type JobRegisterRequest struct { // Eval is the evaluation that is associated with the job registration Eval *Evaluation + // Deployment is the deployment to be create when the job is registered. If + // there is an active deployment for the job it will be canceled. + Deployment *Deployment + WriteRequest } @@ -2017,6 +2021,14 @@ type Node struct { // LastDrain contains metadata about the most recent drain operation LastDrain *DrainMetadata + // LastMissedHeartbeatIndex stores the Raft index when the node last missed + // a heartbeat. It resets to zero once the node is marked as ready again. + LastMissedHeartbeatIndex uint64 + + // LastAllocUpdateIndex stores the Raft index of the last time the node + // updatedd its allocations status. + LastAllocUpdateIndex uint64 + // Raft Indexes CreateIndex uint64 ModifyIndex uint64 @@ -2111,6 +2123,17 @@ func (n *Node) Copy() *Node { return &nn } +// UnresponsiveStatus returns true if the node is a status where it is not +// communicating with the server. +func (n *Node) UnresponsiveStatus() bool { + switch n.Status { + case NodeStatusDown, NodeStatusDisconnected: + return true + default: + return false + } +} + // TerminalStatus returns if the current status is terminal and // will no longer transition. func (n *Node) TerminalStatus() bool { @@ -2578,7 +2601,7 @@ func (p AllocatedPorts) Get(label string) (AllocatedPortMapping, bool) { } type Port struct { - // Label is the key for HCL port stanzas: port "foo" {} + // Label is the key for HCL port blocks: port "foo" {} Label string // Value is the static or dynamic port value. For dynamic ports this @@ -2890,7 +2913,7 @@ type NodeResources struct { // Networks is the node's bridge network and default interface. It is // only used when scheduling jobs with a deprecated - // task.resources.network stanza. + // task.resources.network block. Networks Networks // MinDynamicPort and MaxDynamicPort represent the inclusive port range @@ -4123,7 +4146,7 @@ type Job struct { TaskGroups []*TaskGroup // See agent.ApiJobToStructJob - // Update provides defaults for the TaskGroup Update stanzas + // Update provides defaults for the TaskGroup Update blocks Update UpdateStrategy Multiregion *Multiregion @@ -4182,7 +4205,7 @@ type Job struct { // of a deployment and can be manually set via APIs. This field is updated // when the status of a corresponding deployment transitions to Failed // or Successful. This field is not meaningful for jobs that don't have an - // update stanza. + // update block. Stable bool // Version is a monotonically increasing version number that is incremented @@ -4348,7 +4371,7 @@ func (j *Job) Validate() error { } if j.Type == JobTypeSystem { if j.Affinities != nil { - mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have an affinity stanza")) + mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have an affinity block")) } } else { for idx, affinity := range j.Affinities { @@ -4361,7 +4384,7 @@ func (j *Job) Validate() error { if j.Type == JobTypeSystem { if j.Spreads != nil { - mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have a spread stanza")) + mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have a spread block")) } } else { for idx, spread := range j.Spreads { @@ -6320,15 +6343,27 @@ func (tg *TaskGroup) Canonicalize(job *Job) { // NomadServices returns a list of all group and task - level services in tg that // are making use of the nomad service provider. func (tg *TaskGroup) NomadServices() []*Service { + return tg.filterServices(func(s *Service) bool { + return s.Provider == ServiceProviderNomad + }) +} + +func (tg *TaskGroup) ConsulServices() []*Service { + return tg.filterServices(func(s *Service) bool { + return s.Provider == ServiceProviderConsul || s.Provider == "" + }) +} + +func (tg *TaskGroup) filterServices(f func(s *Service) bool) []*Service { var services []*Service for _, service := range tg.Services { - if service.Provider == ServiceProviderNomad { + if f(service) { services = append(services, service) } } for _, task := range tg.Tasks { for _, service := range task.Services { - if service.Provider == ServiceProviderNomad { + if f(service) { services = append(services, service) } } @@ -6368,7 +6403,7 @@ func (tg *TaskGroup) Validate(j *Job) error { } if j.Type == JobTypeSystem { if tg.Affinities != nil { - mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have an affinity stanza")) + mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have an affinity block")) } } else { for idx, affinity := range tg.Affinities { @@ -6389,7 +6424,7 @@ func (tg *TaskGroup) Validate(j *Job) error { if j.Type == JobTypeSystem { if tg.Spreads != nil { - mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have a spread stanza")) + mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have a spread block")) } } else { for idx, spread := range tg.Spreads { @@ -6475,7 +6510,7 @@ func (tg *TaskGroup) Validate(j *Job) error { canaries = tg.Update.Canary } for name, volReq := range tg.Volumes { - if err := volReq.Validate(tg.Count, canaries); err != nil { + if err := volReq.Validate(j.Type, tg.Count, canaries); err != nil { mErr.Errors = append(mErr.Errors, fmt.Errorf( "Task group volume validation for %s failed: %v", name, err)) } @@ -6730,7 +6765,7 @@ func (tg *TaskGroup) validateServices() error { mErr.Errors = append(mErr.Errors, fmt.Errorf( "Services are not unique: %s", - idDuplicateSet.String( + idDuplicateSet.StringFunc( func(u unique) string { s := u.task + "->" + u.name if u.port != "" { @@ -7265,7 +7300,7 @@ func (t *Task) Validate(ephemeralDisk *EphemeralDisk, jobType string, tgServices if jobType == JobTypeSystem { if t.Affinities != nil { - mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have an affinity stanza")) + mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have an affinity block")) } } else { for idx, affinity := range t.Affinities { @@ -7335,9 +7370,9 @@ func (t *Task) Validate(ephemeralDisk *EphemeralDisk, jobType string, tgServices // Validation for TaskKind field which is used for Consul Connect integration if t.Kind.IsConnectProxy() { - // This task is a Connect proxy so it should not have service stanzas + // This task is a Connect proxy so it should not have service blocks if len(t.Services) > 0 { - mErr.Errors = append(mErr.Errors, fmt.Errorf("Connect proxy task must not have a service stanza")) + mErr.Errors = append(mErr.Errors, fmt.Errorf("Connect proxy task must not have a service block")) } if t.Leader { mErr.Errors = append(mErr.Errors, fmt.Errorf("Connect proxy task must not have leader set")) @@ -7530,7 +7565,7 @@ func (t *Task) Warnings() error { // Validate the resources if t.Resources != nil && t.Resources.IOPS != 0 { - mErr.Errors = append(mErr.Errors, fmt.Errorf("IOPS has been deprecated as of Nomad 0.9.0. Please remove IOPS from resource stanza.")) + mErr.Errors = append(mErr.Errors, fmt.Errorf("IOPS has been deprecated as of Nomad 0.9.0. Please remove IOPS from resource block.")) } if t.Resources != nil && len(t.Resources.Networks) != 0 { @@ -7857,7 +7892,7 @@ func (t *Template) Warnings() error { // Deprecation notice for vault_grace if t.VaultGrace != 0 { - mErr.Errors = append(mErr.Errors, fmt.Errorf("VaultGrace has been deprecated as of Nomad 0.11 and ignored since Vault 0.5. Please remove VaultGrace / vault_grace from template stanza.")) + mErr.Errors = append(mErr.Errors, fmt.Errorf("VaultGrace has been deprecated as of Nomad 0.11 and ignored since Vault 0.5. Please remove VaultGrace / vault_grace from template block.")) } return mErr.ErrorOrNil() @@ -9032,7 +9067,7 @@ func (s *Spread) Validate() error { mErr.Errors = append(mErr.Errors, errors.New("Missing spread attribute")) } if s.Weight <= 0 || s.Weight > 100 { - mErr.Errors = append(mErr.Errors, errors.New("Spread stanza must have a positive weight from 0 to 100")) + mErr.Errors = append(mErr.Errors, errors.New("Spread block must have a positive weight from 0 to 100")) } seen := make(map[string]struct{}) sumPercent := uint32(0) @@ -9219,14 +9254,15 @@ func (v *Vault) Validate() error { const ( // DeploymentStatuses are the various states a deployment can be be in - DeploymentStatusRunning = "running" - DeploymentStatusPaused = "paused" - DeploymentStatusFailed = "failed" - DeploymentStatusSuccessful = "successful" - DeploymentStatusCancelled = "cancelled" - DeploymentStatusPending = "pending" - DeploymentStatusBlocked = "blocked" - DeploymentStatusUnblocking = "unblocking" + DeploymentStatusRunning = "running" + DeploymentStatusPaused = "paused" + DeploymentStatusFailed = "failed" + DeploymentStatusSuccessful = "successful" + DeploymentStatusCancelled = "cancelled" + DeploymentStatusInitializing = "initializing" + DeploymentStatusPending = "pending" + DeploymentStatusBlocked = "blocked" + DeploymentStatusUnblocking = "unblocking" // TODO Statuses and Descriptions do not match 1:1 and we sometimes use the Description as a status flag @@ -9360,7 +9396,8 @@ func (d *Deployment) Copy() *Deployment { // Active returns whether the deployment is active or terminal. func (d *Deployment) Active() bool { switch d.Status { - case DeploymentStatusRunning, DeploymentStatusPaused, DeploymentStatusBlocked, DeploymentStatusUnblocking, DeploymentStatusPending: + case DeploymentStatusRunning, DeploymentStatusPaused, DeploymentStatusBlocked, + DeploymentStatusUnblocking, DeploymentStatusInitializing, DeploymentStatusPending: return true default: return false @@ -11771,6 +11808,7 @@ type KeyringRequest struct { type RecoverableError struct { Err string Recoverable bool + wrapped error } // NewRecoverableError is used to wrap an error and mark it as recoverable or @@ -11783,6 +11821,7 @@ func NewRecoverableError(e error, recoverable bool) error { return &RecoverableError{ Err: e.Error(), Recoverable: recoverable, + wrapped: e, } } @@ -11805,6 +11844,10 @@ func (r *RecoverableError) IsUnrecoverable() bool { return !r.Recoverable } +func (r *RecoverableError) Unwrap() error { + return r.wrapped +} + // Recoverable is an interface for errors to implement to indicate whether or // not they are fatal or recoverable. type Recoverable interface { diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/uuid.go b/vendor/github.com/hashicorp/nomad/nomad/structs/uuid.go index f983fd7b..64beeacd 100644 --- a/vendor/github.com/hashicorp/nomad/nomad/structs/uuid.go +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/uuid.go @@ -3,5 +3,5 @@ package structs // MaxUUIDsPerWriteRequest is the maximum number of UUIDs that can be included // within a single write request. This is to ensure that the Raft message does // not become too large. The resulting value corresponds to 0.25MB of IDs or -// 7282 UUID strings. -var MaxUUIDsPerWriteRequest = (1024 * 256) / 36 +// 7281 UUID strings. +const MaxUUIDsPerWriteRequest = 7281 // (1024 * 256) / 36 diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/volumes.go b/vendor/github.com/hashicorp/nomad/nomad/structs/volumes.go index 0f8b040d..0b16f9b3 100644 --- a/vendor/github.com/hashicorp/nomad/nomad/structs/volumes.go +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/volumes.go @@ -102,7 +102,7 @@ type VolumeRequest struct { PerAlloc bool } -func (v *VolumeRequest) Validate(taskGroupCount, canaries int) error { +func (v *VolumeRequest) Validate(jobType string, taskGroupCount, canaries int) error { if !(v.Type == VolumeTypeHost || v.Type == VolumeTypeCSI) { return fmt.Errorf("volume has unrecognized type %s", v.Type) @@ -170,9 +170,13 @@ func (v *VolumeRequest) Validate(taskGroupCount, canaries int) error { case CSIVolumeAccessModeMultiNodeMultiWriter: // note: we intentionally allow read-only mount of this mode } - - if v.PerAlloc && canaries > 0 { - addErr("volume cannot be per_alloc when canaries are in use") + if v.PerAlloc { + if jobType == JobTypeSystem || jobType == JobTypeSysBatch { + addErr("volume cannot be per_alloc for system or sysbatch jobs") + } + if canaries > 0 { + addErr("volume cannot be per_alloc when canaries are in use") + } } } diff --git a/vendor/github.com/hashicorp/nomad/plugins/drivers/driver.go b/vendor/github.com/hashicorp/nomad/plugins/drivers/driver.go index 505591ea..616e4f78 100644 --- a/vendor/github.com/hashicorp/nomad/plugins/drivers/driver.go +++ b/vendor/github.com/hashicorp/nomad/plugins/drivers/driver.go @@ -405,7 +405,7 @@ type LinuxResources struct { // and thus the calculation for CPUQuota cannot be done on the client. // This is a capatability and should only be used by docker until the docker // specific options are deprecated in favor of exposes CPUPeriod and - // CPUQuota at the task resource stanza. + // CPUQuota at the task resource block. PercentTicks float64 } diff --git a/vendor/github.com/hashicorp/nomad/plugins/drivers/proto/driver.pb.go b/vendor/github.com/hashicorp/nomad/plugins/drivers/proto/driver.pb.go index d956f718..180d5a23 100644 --- a/vendor/github.com/hashicorp/nomad/plugins/drivers/proto/driver.pb.go +++ b/vendor/github.com/hashicorp/nomad/plugins/drivers/proto/driver.pb.go @@ -308,7 +308,7 @@ func (m *TaskConfigSchemaRequest) XXX_DiscardUnknown() { var xxx_messageInfo_TaskConfigSchemaRequest proto.InternalMessageInfo type TaskConfigSchemaResponse struct { - // Spec is the configuration schema for the job driver config stanza + // Spec is the configuration schema for the job driver config block Spec *hclspec.Spec `protobuf:"bytes,1,opt,name=spec,proto3" json:"spec,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` diff --git a/vendor/github.com/hashicorp/nomad/plugins/drivers/proto/driver.proto b/vendor/github.com/hashicorp/nomad/plugins/drivers/proto/driver.proto index 955c79be..8511e61c 100644 --- a/vendor/github.com/hashicorp/nomad/plugins/drivers/proto/driver.proto +++ b/vendor/github.com/hashicorp/nomad/plugins/drivers/proto/driver.proto @@ -91,7 +91,7 @@ message TaskConfigSchemaRequest {} message TaskConfigSchemaResponse { - // Spec is the configuration schema for the job driver config stanza + // Spec is the configuration schema for the job driver config block hashicorp.nomad.plugins.shared.hclspec.Spec spec = 1; } diff --git a/vendor/github.com/hashicorp/nomad/plugins/drivers/testutils/exec_testing.go b/vendor/github.com/hashicorp/nomad/plugins/drivers/testutils/exec_testing.go index c51099af..68f1078d 100644 --- a/vendor/github.com/hashicorp/nomad/plugins/drivers/testutils/exec_testing.go +++ b/vendor/github.com/hashicorp/nomad/plugins/drivers/testutils/exec_testing.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "os" "reflect" "regexp" @@ -176,7 +175,7 @@ func TestExecFSIsolation(t *testing.T, driver *DriverHarness, taskID string) { t.Logf("created file in task: %v", tempfile) // read from host - b, err := ioutil.ReadFile(tempfile) + b, err := os.ReadFile(tempfile) if !isolated { require.NoError(t, err) require.Equal(t, text, strings.TrimSpace(string(b))) diff --git a/vendor/github.com/hashicorp/nomad/plugins/drivers/testutils/testing.go b/vendor/github.com/hashicorp/nomad/plugins/drivers/testutils/testing.go index d568db14..89ff1496 100644 --- a/vendor/github.com/hashicorp/nomad/plugins/drivers/testutils/testing.go +++ b/vendor/github.com/hashicorp/nomad/plugins/drivers/testutils/testing.go @@ -3,7 +3,6 @@ package testutils import ( "context" "fmt" - "io/ioutil" "os" "path/filepath" "runtime" @@ -116,7 +115,7 @@ func (h *DriverHarness) cleanupCgroup() { // A cleanup func is returned and should be deferred so as to not leak dirs // between tests. func (h *DriverHarness) MkAllocDir(t *drivers.TaskConfig, enableLogs bool) func() { - dir, err := ioutil.TempDir("", "nomad_driver_harness-") + dir, err := os.MkdirTemp("", "nomad_driver_harness-") require.NoError(h.t, err) allocDir := allocdir.NewAllocDir(h.logger, dir, t.AllocID) diff --git a/vendor/github.com/hashicorp/nomad/testutil/server.go b/vendor/github.com/hashicorp/nomad/testutil/server.go index 8f6e4da3..ca0abdb9 100644 --- a/vendor/github.com/hashicorp/nomad/testutil/server.go +++ b/vendor/github.com/hashicorp/nomad/testutil/server.go @@ -16,15 +16,14 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net/http" "os" "os/exec" "time" cleanhttp "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/discover" - "github.com/hashicorp/nomad/helper/freeport" testing "github.com/mitchellh/go-testing-interface" ) @@ -98,8 +97,8 @@ type ServerConfigCallback func(c *TestServerConfig) // defaultServerConfig returns a new TestServerConfig struct // with all of the listen ports incremented by one. -func defaultServerConfig() (*TestServerConfig, []int) { - ports := freeport.MustTake(3) +func defaultServerConfig() *TestServerConfig { + ports := ci.PortAllocator.Grab(3) return &TestServerConfig{ NodeName: fmt.Sprintf("node-%d", ports[0]), DisableCheckpoint: true, @@ -123,7 +122,7 @@ func defaultServerConfig() (*TestServerConfig, []int) { ACL: &ACLConfig{ Enabled: false, }, - }, ports + } } // TestServer is the main server wrapper struct. @@ -132,10 +131,6 @@ type TestServer struct { Config *TestServerConfig t testing.T - // ports (if any) that are reserved through freeport that must be returned - // at the end of a test, done when Close() is called. - ports []int - HTTPAddr string SerfAddr string HTTPClient *http.Client @@ -157,19 +152,19 @@ func NewTestServer(t testing.T, cb ServerConfigCallback) *TestServer { t.Skipf("nomad version failed: %v", err) } - dataDir, err := ioutil.TempDir("", "nomad") + dataDir, err := os.MkdirTemp("", "nomad") if err != nil { t.Fatalf("err: %s", err) } - configFile, err := ioutil.TempFile(dataDir, "nomad") + configFile, err := os.CreateTemp(dataDir, "nomad") if err != nil { defer os.RemoveAll(dataDir) t.Fatalf("err: %s", err) } defer configFile.Close() - nomadConfig, ports := defaultServerConfig() + nomadConfig := defaultServerConfig() nomadConfig.DataDir = dataDir if cb != nil { @@ -216,8 +211,6 @@ func NewTestServer(t testing.T, cb ServerConfigCallback) *TestServer { cmd: cmd, t: t, - ports: ports, - HTTPAddr: fmt.Sprintf("127.0.0.1:%d", nomadConfig.Ports.HTTP), SerfAddr: fmt.Sprintf("127.0.0.1:%d", nomadConfig.Ports.Serf), HTTPClient: client, @@ -240,8 +233,6 @@ func NewTestServer(t testing.T, cb ServerConfigCallback) *TestServer { // Stop stops the test Nomad server, and removes the Nomad data // directory once we are done. func (s *TestServer) Stop() { - defer freeport.Return(s.ports) - defer os.RemoveAll(s.Config.DataDir) // wait for the process to exit to be sure that the data dir can be diff --git a/vendor/github.com/hashicorp/nomad/testutil/tls.go b/vendor/github.com/hashicorp/nomad/testutil/tls.go new file mode 100644 index 00000000..b5fa37ac --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/testutil/tls.go @@ -0,0 +1,49 @@ +package testutil + +import ( + "crypto/x509" + "io/fs" + "os" + "testing" + + "github.com/hashicorp/nomad/helper/tlsutil" + "github.com/stretchr/testify/require" +) + +// Assert CA file exists and is a valid CA Returns the CA +func IsValidCertificate(t *testing.T, caPath string) *x509.Certificate { + t.Helper() + + require.FileExists(t, caPath) + caData, err := os.ReadFile(caPath) + require.NoError(t, err) + + ca, err := tlsutil.ParseCert(string(caData)) + require.NoError(t, err) + require.NotNil(t, ca) + + return ca +} + +// Assert key file exists and is a valid signer returns a bool +func IsValidSigner(t *testing.T, keyPath string) bool { + t.Helper() + + require.FileExists(t, keyPath) + fi, err := os.Stat(keyPath) + if err != nil { + t.Fatal("should not happen", err) + } + if want, have := fs.FileMode(0600), fi.Mode().Perm(); want != have { + t.Fatalf("private key file %s: permissions: want: %o; have: %o", keyPath, want, have) + } + + keyData, err := os.ReadFile(keyPath) + require.NoError(t, err) + + signer, err := tlsutil.ParseSigner(string(keyData)) + require.NoError(t, err) + require.NotNil(t, signer) + + return true +} diff --git a/vendor/github.com/hashicorp/nomad/testutil/vault.go b/vendor/github.com/hashicorp/nomad/testutil/vault.go index 5cd4f81d..566641cd 100644 --- a/vendor/github.com/hashicorp/nomad/testutil/vault.go +++ b/vendor/github.com/hashicorp/nomad/testutil/vault.go @@ -3,12 +3,11 @@ package testutil import ( "errors" "fmt" - "math/rand" "os" "os/exec" "time" - "github.com/hashicorp/nomad/helper/freeport" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/structs/config" @@ -30,10 +29,6 @@ type TestVault struct { t testing.T waitCh chan error - // ports (if any) that are reserved through freeport that must be returned - // at the end of a test, done when Stop() is called. - ports []int - Addr string HTTPAddr string RootToken string @@ -42,93 +37,70 @@ type TestVault struct { } func NewTestVaultFromPath(t testing.T, binary string) *TestVault { - var ports []int - nextPort := func() int { - next := freeport.MustTake(1) - ports = append(ports, next...) - return next[0] - } - - for i := 10; i >= 0; i-- { - - port := nextPort() // collect every port for cleanup after the test - - token := uuid.Generate() - bind := fmt.Sprintf("-dev-listen-address=127.0.0.1:%d", port) - http := fmt.Sprintf("http://127.0.0.1:%d", port) - root := fmt.Sprintf("-dev-root-token-id=%s", token) - - cmd := exec.Command(binary, "server", "-dev", bind, root) - cmd.Stdout = testlog.NewWriter(t) - cmd.Stderr = testlog.NewWriter(t) + port := ci.PortAllocator.Grab(1)[0] + token := uuid.Generate() + bind := fmt.Sprintf("-dev-listen-address=127.0.0.1:%d", port) + http := fmt.Sprintf("http://127.0.0.1:%d", port) + root := fmt.Sprintf("-dev-root-token-id=%s", token) - // Build the config - conf := vapi.DefaultConfig() - conf.Address = http + cmd := exec.Command(binary, "server", "-dev", bind, root) + cmd.Stdout = testlog.NewWriter(t) + cmd.Stderr = testlog.NewWriter(t) - // Make the client and set the token to the root token - client, err := vapi.NewClient(conf) - if err != nil { - t.Fatalf("failed to build Vault API client: %v", err) - } - client.SetToken(token) - - enable := true - tv := &TestVault{ - cmd: cmd, - t: t, - ports: ports, - Addr: bind, - HTTPAddr: http, - RootToken: token, - Client: client, - Config: &config.VaultConfig{ - Enabled: &enable, - Token: token, - Addr: http, - }, - } + // Build the config + conf := vapi.DefaultConfig() + conf.Address = http - if err := tv.cmd.Start(); err != nil { - tv.t.Fatalf("failed to start vault: %v", err) - } + // Make the client and set the token to the root token + client, err := vapi.NewClient(conf) + if err != nil { + t.Fatalf("failed to build Vault API client: %v", err) + } + client.SetToken(token) - // Start the waiter - tv.waitCh = make(chan error, 1) - go func() { - err := tv.cmd.Wait() - tv.waitCh <- err - }() + enable := true + tv := &TestVault{ + cmd: cmd, + t: t, + Addr: bind, + HTTPAddr: http, + RootToken: token, + Client: client, + Config: &config.VaultConfig{ + Enabled: &enable, + Token: token, + Addr: http, + }, + } - // Ensure Vault started - var startErr error - select { - case startErr = <-tv.waitCh: - case <-time.After(time.Duration(500*TestMultiplier()) * time.Millisecond): - } + if err = tv.cmd.Start(); err != nil { + tv.t.Fatalf("failed to start vault: %v", err) + } - if startErr != nil && i == 0 { - t.Fatalf("failed to start vault: %v", startErr) - } else if startErr != nil { - wait := time.Duration(rand.Int31n(2000)) * time.Millisecond - time.Sleep(wait) - continue - } + // Start the waiter + tv.waitCh = make(chan error, 1) + go func() { + err = tv.cmd.Wait() + tv.waitCh <- err + }() - waitErr := tv.waitForAPI() - if waitErr != nil && i == 0 { - t.Fatalf("failed to start vault: %v", waitErr) - } else if waitErr != nil { - wait := time.Duration(rand.Int31n(2000)) * time.Millisecond - time.Sleep(wait) - continue - } + // Ensure Vault started + var startErr error + select { + case startErr = <-tv.waitCh: + case <-time.After(time.Duration(500*TestMultiplier()) * time.Millisecond): + } - return tv + if startErr != nil { + t.Fatalf("failed to start vault: %v", startErr) } - return nil + waitErr := tv.waitForAPI() + if waitErr != nil { + t.Fatalf("failed to start vault: %v", waitErr) + } + return tv } // NewTestVault returns a new TestVault instance that is ready for API calls @@ -141,7 +113,7 @@ func NewTestVault(t testing.T) *TestVault { // Start must be called and it is the callers responsibility to deal with any // port conflicts that may occur and retry accordingly. func NewTestVaultDelayed(t testing.T) *TestVault { - port := freeport.MustTake(1)[0] + port := ci.PortAllocator.Grab(1)[0] token := uuid.Generate() bind := fmt.Sprintf("-dev-listen-address=127.0.0.1:%d", port) http := fmt.Sprintf("http://127.0.0.1:%d", port) @@ -209,8 +181,6 @@ func (tv *TestVault) Start() error { // Stop stops the test Vault server func (tv *TestVault) Stop() { - defer freeport.Return(tv.ports) - if tv.cmd.Process == nil { return } diff --git a/vendor/github.com/hashicorp/nomad/testutil/wait.go b/vendor/github.com/hashicorp/nomad/testutil/wait.go index 5e3d87d9..006d2e21 100644 --- a/vendor/github.com/hashicorp/nomad/testutil/wait.go +++ b/vendor/github.com/hashicorp/nomad/testutil/wait.go @@ -6,8 +6,10 @@ import ( "testing" "time" + "github.com/google/go-cmp/cmp" "github.com/hashicorp/nomad/nomad/structs" "github.com/kr/pretty" + "github.com/shoenig/test/must" "github.com/stretchr/testify/require" ) @@ -135,7 +137,12 @@ func WaitForLeader(t testing.TB, rpc rpcFn) { // WaitForClient blocks until the client can be found func WaitForClient(t testing.TB, rpc rpcFn, nodeID string, region string) { + t.Helper() + WaitForClientStatus(t, rpc, nodeID, region, structs.NodeStatusReady) +} +// WaitForClientStatus blocks until the client is in the expected status. +func WaitForClientStatus(t testing.TB, rpc rpcFn, nodeID string, region string, status string) { t.Helper() if region == "" { @@ -155,12 +162,15 @@ func WaitForClient(t testing.TB, rpc rpcFn, nodeID string, region string) { if out.Node == nil { return false, fmt.Errorf("node not found") } - return out.Node.Status == structs.NodeStatusReady, nil + if out.Node.Status != status { + return false, fmt.Errorf("node is %s, not %s", out.Node.Status, status) + } + return true, nil }, func(err error) { - t.Fatalf("failed to find node: %v", err) + t.Fatalf("failed to wait for node staus: %v", err) }) - t.Logf("[TEST] Client for test %s ready, id: %s, region: %s", t.Name(), nodeID, region) + t.Logf("[TEST] Client for test %s %s, id: %s, region: %s", t.Name(), status, nodeID, region) } // WaitForVotingMembers blocks until autopilot promotes all server peers @@ -262,6 +272,53 @@ func WaitForRunning(t testing.TB, rpc rpcFn, job *structs.Job) []*structs.AllocL return WaitForRunningWithToken(t, rpc, job, "") } +// WaitforJobAllocStatus blocks until the ClientStatus of allocations for a job +// match the expected map of : . +func WaitForJobAllocStatus(t testing.TB, rpc rpcFn, job *structs.Job, allocStatus map[string]int) { + t.Helper() + WaitForJobAllocStatusWithToken(t, rpc, job, allocStatus, "") +} + +// WaitForJobAllocStatusWithToken behaves the same way as WaitForJobAllocStatus +// but is used for clusters with ACL enabled. +func WaitForJobAllocStatusWithToken(t testing.TB, rpc rpcFn, job *structs.Job, allocStatus map[string]int, token string) { + t.Helper() + + WaitForResultRetries(2000*TestMultiplier(), func() (bool, error) { + args := &structs.JobSpecificRequest{ + JobID: job.ID, + QueryOptions: structs.QueryOptions{ + AuthToken: token, + Namespace: job.Namespace, + Region: job.Region, + }, + } + + var resp structs.JobAllocationsResponse + err := rpc("Job.Allocations", args, &resp) + if err != nil { + return false, fmt.Errorf("Job.Allocations error: %v", err) + } + + if len(resp.Allocations) == 0 { + evals := structs.JobEvaluationsResponse{} + require.NoError(t, rpc("Job.Evaluations", args, &evals), "error looking up evals") + return false, fmt.Errorf("0 allocations; evals: %s", pretty.Sprint(evals.Evaluations)) + } + + got := map[string]int{} + for _, alloc := range resp.Allocations { + got[alloc.ClientStatus]++ + } + if diff := cmp.Diff(allocStatus, got); diff != "" { + return false, fmt.Errorf("alloc status mismatch (-want +got):\n%s", diff) + } + return true, nil + }, func(err error) { + must.NoError(t, err) + }) +} + // WaitForFiles blocks until all the files in the slice are present func WaitForFiles(t testing.TB, files []string) { WaitForResult(func() (bool, error) { diff --git a/vendor/github.com/hashicorp/nomad/version/version.go b/vendor/github.com/hashicorp/nomad/version/version.go index 2739334b..ebe59466 100644 --- a/vendor/github.com/hashicorp/nomad/version/version.go +++ b/vendor/github.com/hashicorp/nomad/version/version.go @@ -11,7 +11,7 @@ var ( GitDescribe string // The main version number that is being run at the moment. - Version = "1.4.4" + Version = "1.4.6" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release diff --git a/vendor/github.com/hashicorp/vault/api/LICENSE b/vendor/github.com/hashicorp/vault/api/LICENSE index e87a115e..f4f97ee5 100644 --- a/vendor/github.com/hashicorp/vault/api/LICENSE +++ b/vendor/github.com/hashicorp/vault/api/LICENSE @@ -1,3 +1,5 @@ +Copyright (c) 2015 HashiCorp, Inc. + Mozilla Public License, version 2.0 1. Definitions diff --git a/vendor/github.com/hashicorp/vault/api/logical.go b/vendor/github.com/hashicorp/vault/api/logical.go index 747b9bc1..d2e5bb5e 100644 --- a/vendor/github.com/hashicorp/vault/api/logical.go +++ b/vendor/github.com/hashicorp/vault/api/logical.go @@ -65,23 +65,7 @@ func (c *Logical) ReadWithDataWithContext(ctx context.Context, path string, data ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) defer cancelFunc() - r := c.c.NewRequest(http.MethodGet, "/v1/"+path) - - var values url.Values - for k, v := range data { - if values == nil { - values = make(url.Values) - } - for _, val := range v { - values.Add(k, val) - } - } - - if values != nil { - r.Params = values - } - - resp, err := c.c.rawRequestWithContext(ctx, r) + resp, err := c.readRawWithDataWithContext(ctx, path, data) if resp != nil { defer resp.Body.Close() } @@ -106,6 +90,41 @@ func (c *Logical) ReadWithDataWithContext(ctx context.Context, path string, data return ParseSecret(resp.Body) } +func (c *Logical) ReadRaw(path string) (*Response, error) { + return c.ReadRawWithData(path, nil) +} + +func (c *Logical) ReadRawWithData(path string, data map[string][]string) (*Response, error) { + return c.ReadRawWithDataWithContext(context.Background(), path, data) +} + +func (c *Logical) ReadRawWithDataWithContext(ctx context.Context, path string, data map[string][]string) (*Response, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + return c.readRawWithDataWithContext(ctx, path, data) +} + +func (c *Logical) readRawWithDataWithContext(ctx context.Context, path string, data map[string][]string) (*Response, error) { + r := c.c.NewRequest(http.MethodGet, "/v1/"+path) + + var values url.Values + for k, v := range data { + if values == nil { + values = make(url.Values) + } + for _, val := range v { + values.Add(k, val) + } + } + + if values != nil { + r.Params = values + } + + return c.c.RawRequestWithContext(ctx, r) +} + func (c *Logical) List(path string) (*Secret, error) { return c.ListWithContext(context.Background(), path) } diff --git a/vendor/github.com/hashicorp/vault/sdk/LICENSE b/vendor/github.com/hashicorp/vault/sdk/LICENSE index e87a115e..f4f97ee5 100644 --- a/vendor/github.com/hashicorp/vault/sdk/LICENSE +++ b/vendor/github.com/hashicorp/vault/sdk/LICENSE @@ -1,3 +1,5 @@ +Copyright (c) 2015 HashiCorp, Inc. + Mozilla Public License, version 2.0 1. Definitions diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/certutil/helpers.go b/vendor/github.com/hashicorp/vault/sdk/helper/certutil/helpers.go index 348c85f9..58ebc06f 100644 --- a/vendor/github.com/hashicorp/vault/sdk/helper/certutil/helpers.go +++ b/vendor/github.com/hashicorp/vault/sdk/helper/certutil/helpers.go @@ -64,6 +64,20 @@ var SignatureAlgorithmNames = map[string]x509.SignatureAlgorithm{ "ed25519": x509.PureEd25519, // Duplicated for clarity; most won't expect the "Pure" prefix. } +// Mapping of constant values<->constant names for SignatureAlgorithm +var InvSignatureAlgorithmNames = map[x509.SignatureAlgorithm]string{ + x509.SHA256WithRSA: "SHA256WithRSA", + x509.SHA384WithRSA: "SHA384WithRSA", + x509.SHA512WithRSA: "SHA512WithRSA", + x509.ECDSAWithSHA256: "ECDSAWithSHA256", + x509.ECDSAWithSHA384: "ECDSAWithSHA384", + x509.ECDSAWithSHA512: "ECDSAWithSHA512", + x509.SHA256WithRSAPSS: "SHA256WithRSAPSS", + x509.SHA384WithRSAPSS: "SHA384WithRSAPSS", + x509.SHA512WithRSAPSS: "SHA512WithRSAPSS", + x509.PureEd25519: "Ed25519", +} + // OID for RFC 5280 Delta CRL Indicator CRL extension. // // > id-ce-deltaCRLIndicator OBJECT IDENTIFIER ::= { id-ce 27 } @@ -789,7 +803,7 @@ func CreateCertificateWithKeyGenerator(data *CreationBundle, randReader io.Reade return createCertificate(data, randReader, keyGenerator) } -// Set correct correct RSA sig algo +// Set correct RSA sig algo func certTemplateSetSigAlgo(certTemplate *x509.Certificate, data *CreationBundle) { if data.Params.UsePSS { switch data.Params.SignatureBits { @@ -812,6 +826,35 @@ func certTemplateSetSigAlgo(certTemplate *x509.Certificate, data *CreationBundle } } +// selectSignatureAlgorithmForRSA returns the proper x509.SignatureAlgorithm based on various properties set in the +// Creation Bundle parameter. This method will default to a SHA256 signature algorithm if the requested signature +// bits is not set/unknown. +func selectSignatureAlgorithmForRSA(data *CreationBundle) x509.SignatureAlgorithm { + if data.Params.UsePSS { + switch data.Params.SignatureBits { + case 256: + return x509.SHA256WithRSAPSS + case 384: + return x509.SHA384WithRSAPSS + case 512: + return x509.SHA512WithRSAPSS + default: + return x509.SHA256WithRSAPSS + } + } + + switch data.Params.SignatureBits { + case 256: + return x509.SHA256WithRSA + case 384: + return x509.SHA384WithRSA + case 512: + return x509.SHA512WithRSA + default: + return x509.SHA256WithRSA + } +} + func createCertificate(data *CreationBundle, randReader io.Reader, privateKeyGenerator KeyGenerator) (*ParsedCertBundle, error) { var err error result := &ParsedCertBundle{} @@ -878,7 +921,11 @@ func createCertificate(data *CreationBundle, randReader io.Reader, privateKeyGen var certBytes []byte if data.SigningBundle != nil { - switch data.SigningBundle.PrivateKeyType { + privateKeyType := data.SigningBundle.PrivateKeyType + if privateKeyType == ManagedPrivateKey { + privateKeyType = GetPrivateKeyTypeFromSigner(data.SigningBundle.PrivateKey) + } + switch privateKeyType { case RSAPrivateKey: certTemplateSetSigAlgo(certTemplate, data) case Ed25519PrivateKey: @@ -986,7 +1033,10 @@ func selectSignatureAlgorithmForECDSA(pub crypto.PublicKey, signatureBits int) x } } -var oidExtensionBasicConstraints = []int{2, 5, 29, 19} +var ( + oidExtensionBasicConstraints = []int{2, 5, 29, 19} + oidExtensionSubjectAltName = []int{2, 5, 29, 17} +) // CreateCSR creates a CSR with the default rand.Reader to // generate a cert/keypair. This is currently only meant @@ -1049,9 +1099,10 @@ func createCSR(data *CreationBundle, addBasicConstraints bool, randReader io.Rea switch data.Params.KeyType { case "rsa": - csrTemplate.SignatureAlgorithm = x509.SHA256WithRSA + // use specified RSA algorithm defaulting to the appropriate SHA256 RSA signature type + csrTemplate.SignatureAlgorithm = selectSignatureAlgorithmForRSA(data) case "ec": - csrTemplate.SignatureAlgorithm = x509.ECDSAWithSHA256 + csrTemplate.SignatureAlgorithm = selectSignatureAlgorithmForECDSA(result.PrivateKey.Public(), data.Params.SignatureBits) case "ed25519": csrTemplate.SignatureAlgorithm = x509.PureEd25519 } @@ -1067,6 +1118,10 @@ func createCSR(data *CreationBundle, addBasicConstraints bool, randReader io.Rea return nil, errutil.InternalError{Err: fmt.Sprintf("unable to parse created certificate: %v", err)} } + if err = result.CSR.CheckSignature(); err != nil { + return nil, errors.New("failed signature validation for CSR") + } + return result, nil } @@ -1127,7 +1182,12 @@ func signCertificate(data *CreationBundle, randReader io.Reader) (*ParsedCertBun certTemplate.NotBefore = time.Now().Add(-1 * data.Params.NotBeforeDuration) } - switch data.SigningBundle.PrivateKeyType { + privateKeyType := data.SigningBundle.PrivateKeyType + if privateKeyType == ManagedPrivateKey { + privateKeyType = GetPrivateKeyTypeFromSigner(data.SigningBundle.PrivateKey) + } + + switch privateKeyType { case RSAPrivateKey: certTemplateSetSigAlgo(certTemplate, data) case ECPrivateKey: @@ -1151,7 +1211,7 @@ func signCertificate(data *CreationBundle, randReader io.Reader) (*ParsedCertBun certTemplate.URIs = data.CSR.URIs for _, name := range data.CSR.Extensions { - if !name.Id.Equal(oidExtensionBasicConstraints) { + if !name.Id.Equal(oidExtensionBasicConstraints) && !(len(data.Params.OtherSANs) > 0 && name.Id.Equal(oidExtensionSubjectAltName)) { certTemplate.ExtraExtensions = append(certTemplate.ExtraExtensions, name) } } diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/certutil/types.go b/vendor/github.com/hashicorp/vault/sdk/helper/certutil/types.go index 03aba849..15b816f0 100644 --- a/vendor/github.com/hashicorp/vault/sdk/helper/certutil/types.go +++ b/vendor/github.com/hashicorp/vault/sdk/helper/certutil/types.go @@ -148,16 +148,16 @@ type KeyBundle struct { } func GetPrivateKeyTypeFromSigner(signer crypto.Signer) PrivateKeyType { - switch signer.(type) { - case *rsa.PrivateKey: + // We look at the public key types to work-around limitations/typing of managed keys. + switch signer.Public().(type) { + case *rsa.PublicKey: return RSAPrivateKey - case *ecdsa.PrivateKey: + case *ecdsa.PublicKey: return ECPrivateKey - case ed25519.PrivateKey: + case ed25519.PublicKey: return Ed25519PrivateKey - default: - return UnknownPrivateKey } + return UnknownPrivateKey } // ToPEMBundle converts a string-based certificate bundle diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/multiplexing.go b/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/multiplexing.go index 9ebc7838..41316ec4 100644 --- a/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/multiplexing.go +++ b/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/multiplexing.go @@ -2,6 +2,7 @@ package pluginutil import ( "context" + "errors" "fmt" "os" "strings" @@ -13,6 +14,8 @@ import ( "google.golang.org/grpc/status" ) +var ErrNoMultiplexingIDFound = errors.New("no multiplexing ID found") + type PluginMultiplexingServerImpl struct { UnimplementedPluginMultiplexingServer @@ -62,7 +65,9 @@ func GetMultiplexIDFromContext(ctx context.Context) (string, error) { } multiplexIDs := md[MultiplexingCtxKey] - if len(multiplexIDs) != 1 { + if len(multiplexIDs) == 0 { + return "", ErrNoMultiplexingIDFound + } else if len(multiplexIDs) != 1 { return "", fmt.Errorf("unexpected number of IDs in metadata: (%d)", len(multiplexIDs)) } diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/multiplexing.pb.go b/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/multiplexing.pb.go index d7073b10..96963af3 100644 --- a/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/multiplexing.pb.go +++ b/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/multiplexing.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.21.5 +// protoc v3.21.7 // source: sdk/helper/pluginutil/multiplexing.proto package pluginutil diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/error.go b/vendor/github.com/hashicorp/vault/sdk/logical/error.go index 02f68dd9..68c8e137 100644 --- a/vendor/github.com/hashicorp/vault/sdk/logical/error.go +++ b/vendor/github.com/hashicorp/vault/sdk/logical/error.go @@ -17,6 +17,11 @@ var ( // ErrPermissionDenied is returned if the client is not authorized ErrPermissionDenied = errors.New("permission denied") + // ErrInvalidCredentials is returned when the provided credentials are incorrect + // This is used internally for user lockout purposes. This is not seen externally. + // The status code returned does not change because of this error + ErrInvalidCredentials = errors.New("invalid credentials") + // ErrMultiAuthzPending is returned if the the request needs more // authorizations ErrMultiAuthzPending = errors.New("request needs further approval") diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/identity.pb.go b/vendor/github.com/hashicorp/vault/sdk/logical/identity.pb.go index 6c1c4b2c..18af6e68 100644 --- a/vendor/github.com/hashicorp/vault/sdk/logical/identity.pb.go +++ b/vendor/github.com/hashicorp/vault/sdk/logical/identity.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.21.5 +// protoc v3.21.7 // source: sdk/logical/identity.proto package logical diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/plugin.pb.go b/vendor/github.com/hashicorp/vault/sdk/logical/plugin.pb.go index 03be5d3c..9be723e1 100644 --- a/vendor/github.com/hashicorp/vault/sdk/logical/plugin.pb.go +++ b/vendor/github.com/hashicorp/vault/sdk/logical/plugin.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.21.5 +// protoc v3.21.7 // source: sdk/logical/plugin.proto package logical diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/response.go b/vendor/github.com/hashicorp/vault/sdk/logical/response.go index 19194f52..0f8a2210 100644 --- a/vendor/github.com/hashicorp/vault/sdk/logical/response.go +++ b/vendor/github.com/hashicorp/vault/sdk/logical/response.go @@ -92,7 +92,8 @@ func (r *Response) AddWarning(warning string) { // IsError returns true if this response seems to indicate an error. func (r *Response) IsError() bool { - return r != nil && r.Data != nil && len(r.Data) == 1 && r.Data["error"] != nil + // If the response data contains only an 'error' element, or an 'error' and a 'data' element only + return r != nil && r.Data != nil && r.Data["error"] != nil && (len(r.Data) == 1 || (r.Data["data"] != nil && len(r.Data) == 2)) } func (r *Response) Error() error { diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/response_util.go b/vendor/github.com/hashicorp/vault/sdk/logical/response_util.go index 7454189f..4a9f61d5 100644 --- a/vendor/github.com/hashicorp/vault/sdk/logical/response_util.go +++ b/vendor/github.com/hashicorp/vault/sdk/logical/response_util.go @@ -122,6 +122,8 @@ func RespondErrorCommon(req *Request, resp *Response, err error) (int, error) { statusCode = http.StatusNotFound case errwrap.Contains(err, ErrRelativePath.Error()): statusCode = http.StatusBadRequest + case errwrap.Contains(err, ErrInvalidCredentials.Error()): + statusCode = http.StatusBadRequest } } @@ -180,3 +182,23 @@ func RespondError(w http.ResponseWriter, status int, err error) { enc := json.NewEncoder(w) enc.Encode(resp) } + +func RespondErrorAndData(w http.ResponseWriter, status int, data interface{}, err error) { + AdjustErrorStatusCode(&status, err) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(status) + + type ErrorAndDataResponse struct { + Errors []string `json:"errors"` + Data interface{} `json:"data""` + } + resp := &ErrorAndDataResponse{Errors: make([]string, 0, 1)} + if err != nil { + resp.Errors = append(resp.Errors, err.Error()) + } + resp.Data = data + + enc := json.NewEncoder(w) + enc.Encode(resp) +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/version.pb.go b/vendor/github.com/hashicorp/vault/sdk/logical/version.pb.go index 7845aeaf..415970f1 100644 --- a/vendor/github.com/hashicorp/vault/sdk/logical/version.pb.go +++ b/vendor/github.com/hashicorp/vault/sdk/logical/version.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.21.5 +// protoc v3.21.7 // source: sdk/logical/version.proto package logical diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/cache.go b/vendor/github.com/hashicorp/vault/sdk/physical/cache.go index ffac3318..af40f538 100644 --- a/vendor/github.com/hashicorp/vault/sdk/physical/cache.go +++ b/vendor/github.com/hashicorp/vault/sdk/physical/cache.go @@ -29,7 +29,6 @@ var cacheExceptionsPaths = []string{ "sys/expire/", "core/poison-pill", "core/raft/tls", - "core/license", } // CacheRefreshContext returns a context with an added value denoting if the diff --git a/vendor/github.com/hashicorp/vault/sdk/version/version_base.go b/vendor/github.com/hashicorp/vault/sdk/version/version_base.go index ebbc418e..e45626e2 100644 --- a/vendor/github.com/hashicorp/vault/sdk/version/version_base.go +++ b/vendor/github.com/hashicorp/vault/sdk/version/version_base.go @@ -11,7 +11,7 @@ var ( // Whether cgo is enabled or not; set at build time CgoEnabled bool - Version = "1.12.0" + Version = "1.13.0" VersionPrerelease = "dev1" VersionMetadata = "" ) diff --git a/vendor/github.com/rogpeppe/go-internal/fmtsort/mapelem.go b/vendor/github.com/rogpeppe/go-internal/fmtsort/mapelem.go index 07c4a471..4e4c2945 100644 --- a/vendor/github.com/rogpeppe/go-internal/fmtsort/mapelem.go +++ b/vendor/github.com/rogpeppe/go-internal/fmtsort/mapelem.go @@ -1,3 +1,4 @@ +//go:build go1.12 // +build go1.12 package fmtsort @@ -7,12 +8,16 @@ import "reflect" const brokenNaNs = false func mapElems(mapValue reflect.Value) ([]reflect.Value, []reflect.Value) { - key := make([]reflect.Value, mapValue.Len()) - value := make([]reflect.Value, len(key)) + // Note: this code is arranged to not panic even in the presence + // of a concurrent map update. The runtime is responsible for + // yelling loudly if that happens. See issue 33275. + n := mapValue.Len() + key := make([]reflect.Value, 0, n) + value := make([]reflect.Value, 0, n) iter := mapValue.MapRange() - for i := 0; iter.Next(); i++ { - key[i] = iter.Key() - value[i] = iter.Value() + for iter.Next() { + key = append(key, iter.Key()) + value = append(value, iter.Value()) } return key, value } diff --git a/vendor/github.com/rogpeppe/go-internal/fmtsort/mapelem_1.11.go b/vendor/github.com/rogpeppe/go-internal/fmtsort/mapelem_1.11.go index 8c28451a..873bf7f5 100644 --- a/vendor/github.com/rogpeppe/go-internal/fmtsort/mapelem_1.11.go +++ b/vendor/github.com/rogpeppe/go-internal/fmtsort/mapelem_1.11.go @@ -1,3 +1,4 @@ +//go:build !go1.12 // +build !go1.12 package fmtsort @@ -8,8 +9,8 @@ const brokenNaNs = true func mapElems(mapValue reflect.Value) ([]reflect.Value, []reflect.Value) { key := mapValue.MapKeys() - value := make([]reflect.Value, len(key)) - for i, k := range key { + value := make([]reflect.Value, 0, len(key)) + for _, k := range key { v := mapValue.MapIndex(k) if !v.IsValid() { // Note: we can't retrieve the value, probably because @@ -17,7 +18,7 @@ func mapElems(mapValue reflect.Value) ([]reflect.Value, []reflect.Value) { // add a zero value of the correct type in that case. v = reflect.Zero(mapValue.Type().Elem()) } - value[i] = v + value = append(value, v) } return key, value } diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_linux.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_linux.go index 4f26230d..a60e462d 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_linux.go +++ b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_linux.go @@ -17,6 +17,71 @@ import ( var ClocksPerSec = float64(100) +var armModelToModelName = map[uint64]string{ + 0x810: "ARM810", + 0x920: "ARM920", + 0x922: "ARM922", + 0x926: "ARM926", + 0x940: "ARM940", + 0x946: "ARM946", + 0x966: "ARM966", + 0xa20: "ARM1020", + 0xa22: "ARM1022", + 0xa26: "ARM1026", + 0xb02: "ARM11 MPCore", + 0xb36: "ARM1136", + 0xb56: "ARM1156", + 0xb76: "ARM1176", + 0xc05: "Cortex-A5", + 0xc07: "Cortex-A7", + 0xc08: "Cortex-A8", + 0xc09: "Cortex-A9", + 0xc0d: "Cortex-A17", + 0xc0f: "Cortex-A15", + 0xc0e: "Cortex-A17", + 0xc14: "Cortex-R4", + 0xc15: "Cortex-R5", + 0xc17: "Cortex-R7", + 0xc18: "Cortex-R8", + 0xc20: "Cortex-M0", + 0xc21: "Cortex-M1", + 0xc23: "Cortex-M3", + 0xc24: "Cortex-M4", + 0xc27: "Cortex-M7", + 0xc60: "Cortex-M0+", + 0xd01: "Cortex-A32", + 0xd02: "Cortex-A34", + 0xd03: "Cortex-A53", + 0xd04: "Cortex-A35", + 0xd05: "Cortex-A55", + 0xd06: "Cortex-A65", + 0xd07: "Cortex-A57", + 0xd08: "Cortex-A72", + 0xd09: "Cortex-A73", + 0xd0a: "Cortex-A75", + 0xd0b: "Cortex-A76", + 0xd0c: "Neoverse-N1", + 0xd0d: "Cortex-A77", + 0xd0e: "Cortex-A76AE", + 0xd13: "Cortex-R52", + 0xd20: "Cortex-M23", + 0xd21: "Cortex-M33", + 0xd40: "Neoverse-V1", + 0xd41: "Cortex-A78", + 0xd42: "Cortex-A78AE", + 0xd43: "Cortex-A65AE", + 0xd44: "Cortex-X1", + 0xd46: "Cortex-A510", + 0xd47: "Cortex-A710", + 0xd48: "Cortex-X2", + 0xd49: "Neoverse-N2", + 0xd4a: "Neoverse-E1", + 0xd4b: "Cortex-A78C", + 0xd4c: "Cortex-X1C", + 0xd4d: "Cortex-A715", + 0xd4e: "Cortex-X3", +} + func init() { clkTck, err := sysconf.Sysconf(sysconf.SC_CLK_TCK) // ignore errors @@ -177,6 +242,17 @@ func InfoWithContext(ctx context.Context) ([]InfoStat, error) { c.Family = value case "model", "CPU part": c.Model = value + // if CPU is arm based, model name is found via model number. refer to: arch/arm64/kernel/cpuinfo.c + if c.VendorID == "ARM" { + if v, err := strconv.ParseUint(c.Model, 0, 16); err == nil { + modelName, exist := armModelToModelName[v] + if exist { + c.ModelName = modelName + } else { + c.ModelName = "Undefined" + } + } + } case "model name", "cpu": c.ModelName = value if strings.Contains(value, "POWER8") || diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_windows.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_windows.go index d1a0e4cd..e10612fd 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_windows.go +++ b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_windows.go @@ -14,8 +14,7 @@ import ( ) var ( - procGetActiveProcessorCount = common.Modkernel32.NewProc("GetActiveProcessorCount") - procGetNativeSystemInfo = common.Modkernel32.NewProc("GetNativeSystemInfo") + procGetNativeSystemInfo = common.Modkernel32.NewProc("GetNativeSystemInfo") ) type win32_Processor struct { @@ -204,15 +203,12 @@ type systemInfo struct { func CountsWithContext(ctx context.Context, logical bool) (int, error) { if logical { // https://github.com/giampaolo/psutil/blob/d01a9eaa35a8aadf6c519839e987a49d8be2d891/psutil/_psutil_windows.c#L97 - err := procGetActiveProcessorCount.Find() - if err == nil { // Win7+ - ret, _, _ := procGetActiveProcessorCount.Call(uintptr(0xffff)) // ALL_PROCESSOR_GROUPS is 0xffff according to Rust's winapi lib https://docs.rs/winapi/*/x86_64-pc-windows-msvc/src/winapi/shared/ntdef.rs.html#120 - if ret != 0 { - return int(ret), nil - } + ret := windows.GetActiveProcessorCount(windows.ALL_PROCESSOR_GROUPS) + if ret != 0 { + return int(ret), nil } var systemInfo systemInfo - _, _, err = procGetNativeSystemInfo.Call(uintptr(unsafe.Pointer(&systemInfo))) + _, _, err := procGetNativeSystemInfo.Call(uintptr(unsafe.Pointer(&systemInfo))) if systemInfo.dwNumberOfProcessors == 0 { return 0, err } diff --git a/vendor/github.com/shirou/gopsutil/v3/disk/disk_aix_nocgo.go b/vendor/github.com/shirou/gopsutil/v3/disk/disk_aix_nocgo.go index eb25cbda..190a2d90 100644 --- a/vendor/github.com/shirou/gopsutil/v3/disk/disk_aix_nocgo.go +++ b/vendor/github.com/shirou/gopsutil/v3/disk/disk_aix_nocgo.go @@ -5,14 +5,78 @@ package disk import ( "context" + "regexp" + "strings" + "golang.org/x/sys/unix" "github.com/shirou/gopsutil/v3/internal/common" ) +var whiteSpaces = regexp.MustCompile(`\s+`) +var startBlank = regexp.MustCompile(`^\s+`) + +var ignoreFSType = map[string]bool{"procfs": true} +var FSType = map[int]string{ + 0: "jfs2", 1: "namefs", 2: "nfs", 3: "jfs", 5: "cdrom", 6: "proc", + 16: "special-fs", 17: "cache-fs", 18: "nfs3", 19: "automount-fs", 20: "pool-fs", 32: "vxfs", + 33: "veritas-fs", 34: "udfs", 35: "nfs4", 36: "nfs4-pseudo", 37: "smbfs", 38: "mcr-pseudofs", + 39: "ahafs", 40: "sterm-nfs", 41: "asmfs", + } + func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, error) { - return []PartitionStat{}, common.ErrNotImplementedError + var ret []PartitionStat + + out, err := invoke.CommandWithContext(ctx, "mount") + if err != nil { + return nil, err + } + + // parse head lines for column names + colidx := make(map[string]int) + lines := strings.Split(string(out), "\n") + if len(lines) < 3 { + return nil, common.ErrNotImplementedError + } + + idx := 0 + start := 0 + finished := false + for pos, ch := range lines[1] { + if ch == ' ' && ! finished { + name := strings.TrimSpace(lines[0][start:pos]) + colidx[name] = idx + finished = true + } else if ch == '-' && finished { + idx++ + start = pos + finished = false + } + } + name := strings.TrimSpace(lines[0][start:len(lines[1])]) + colidx[name] = idx + + for idx := 2; idx < len(lines); idx++ { + line := lines[idx] + if startBlank.MatchString(line) { + line = "localhost" + line + } + p := whiteSpaces.Split(lines[idx], 6) + if len(p) < 5 || ignoreFSType[p[colidx["vfs"]]] { + continue + } + d := PartitionStat{ + Device: p[colidx["mounted"]], + Mountpoint: p[colidx["mounted over"]], + Fstype: p[colidx["vfs"]], + Opts: strings.Split(p[colidx["options"]], ","), + } + + ret = append(ret, d) + } + + return ret, nil } -func UsageWithContext(ctx context.Context, path string) (*UsageStat, error) { - return nil, common.ErrNotImplementedError +func getFsType(stat unix.Statfs_t) string { + return FSType[int(stat.Vfstype)] } diff --git a/vendor/github.com/shirou/gopsutil/v3/disk/disk_darwin.go b/vendor/github.com/shirou/gopsutil/v3/disk/disk_darwin.go index 0877b761..933cb045 100644 --- a/vendor/github.com/shirou/gopsutil/v3/disk/disk_darwin.go +++ b/vendor/github.com/shirou/gopsutil/v3/disk/disk_darwin.go @@ -20,9 +20,15 @@ func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, erro return ret, err } fs := make([]unix.Statfs_t, count) - if _, err = unix.Getfsstat(fs, unix.MNT_WAIT); err != nil { + count, err = unix.Getfsstat(fs, unix.MNT_WAIT) + if err != nil { return ret, err } + // On 10.14, and possibly other OS versions, the actual count may + // be less than from the first call. Truncate to the returned count + // to prevent accessing uninitialized entries. + // https://github.com/shirou/gopsutil/issues/1390 + fs = fs[:count] for _, stat := range fs { opts := []string{"rw"} if stat.Flags&unix.MNT_RDONLY != 0 { diff --git a/vendor/github.com/shirou/gopsutil/v3/disk/disk_darwin_cgo.go b/vendor/github.com/shirou/gopsutil/v3/disk/disk_darwin_cgo.go index b041c8d7..27c24c92 100644 --- a/vendor/github.com/shirou/gopsutil/v3/disk/disk_darwin_cgo.go +++ b/vendor/github.com/shirou/gopsutil/v3/disk/disk_darwin_cgo.go @@ -1,5 +1,5 @@ -//go:build darwin && cgo -// +build darwin,cgo +//go:build darwin && cgo && !ios +// +build darwin,cgo,!ios package disk diff --git a/vendor/github.com/shirou/gopsutil/v3/disk/disk_darwin_nocgo.go b/vendor/github.com/shirou/gopsutil/v3/disk/disk_darwin_nocgo.go index 99bb8ba2..1f099b7c 100644 --- a/vendor/github.com/shirou/gopsutil/v3/disk/disk_darwin_nocgo.go +++ b/vendor/github.com/shirou/gopsutil/v3/disk/disk_darwin_nocgo.go @@ -1,5 +1,5 @@ -//go:build darwin && !cgo -// +build darwin,!cgo +//go:build (darwin && !cgo) || ios +// +build darwin,!cgo ios package disk diff --git a/vendor/github.com/shirou/gopsutil/v3/disk/disk_solaris.go b/vendor/github.com/shirou/gopsutil/v3/disk/disk_solaris.go index 9c4a798d..934d651f 100644 --- a/vendor/github.com/shirou/gopsutil/v3/disk/disk_solaris.go +++ b/vendor/github.com/shirou/gopsutil/v3/disk/disk_solaris.go @@ -10,6 +10,10 @@ import ( "fmt" "math" "os" + "path/filepath" + "regexp" + "runtime" + "strconv" "strings" "github.com/shirou/gopsutil/v3/internal/common" @@ -73,20 +77,129 @@ func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, erro }) } if err := scanner.Err(); err != nil { - return nil, fmt.Errorf("unable to scan %q: %v", _MNTTAB, err) + return nil, fmt.Errorf("unable to scan %q: %w", _MNTTAB, err) } return ret, err } func IOCountersWithContext(ctx context.Context, names ...string) (map[string]IOCountersStat, error) { - return nil, common.ErrNotImplementedError + var issolaris bool + if runtime.GOOS == "illumos" { + issolaris = false + } else { + issolaris = true + } + // check disks instead of zfs pools + filterstr := "/[^zfs]/:::/^nread$|^nwritten$|^reads$|^writes$|^rtime$|^wtime$/" + kstatSysOut, err := invoke.CommandWithContext(ctx, "kstat", "-c", "disk", "-p", filterstr) + if err != nil { + return nil, fmt.Errorf("cannot execute kstat: %w", err) + } + lines := strings.Split(strings.TrimSpace(string(kstatSysOut)), "\n") + if len(lines) == 0 { + return nil, fmt.Errorf("no disk class found") + } + dnamearr := make(map[string]string) + nreadarr := make(map[string]uint64) + nwrittenarr := make(map[string]uint64) + readsarr := make(map[string]uint64) + writesarr := make(map[string]uint64) + rtimearr := make(map[string]uint64) + wtimearr := make(map[string]uint64) + re := regexp.MustCompile(`[:\s]+`) + + // in case the name is "/dev/sda1", then convert to "sda1" + for i, name := range names { + names[i] = filepath.Base(name) + } + + for _, line := range lines { + fields := re.Split(line, -1) + if len(fields) == 0 { + continue + } + moduleName := fields[0] + instance := fields[1] + dname := fields[2] + + if len(names) > 0 && !common.StringsHas(names, dname) { + continue + } + dnamearr[moduleName+instance] = dname + // fields[3] is the statistic label, fields[4] is the value + switch fields[3] { + case "nread": + nreadarr[moduleName+instance], err = strconv.ParseUint((fields[4]), 10, 64) + if err != nil { + return nil, err + } + case "nwritten": + nwrittenarr[moduleName+instance], err = strconv.ParseUint((fields[4]), 10, 64) + if err != nil { + return nil, err + } + case "reads": + readsarr[moduleName+instance], err = strconv.ParseUint((fields[4]), 10, 64) + if err != nil { + return nil, err + } + case "writes": + writesarr[moduleName+instance], err = strconv.ParseUint((fields[4]), 10, 64) + if err != nil { + return nil, err + } + case "rtime": + if issolaris { + // from sec to milli secs + var frtime float64 + frtime, err = strconv.ParseFloat((fields[4]), 64) + rtimearr[moduleName+instance] = uint64(frtime * 1000) + } else { + // from nano to milli secs + rtimearr[moduleName+instance], err = strconv.ParseUint((fields[4]), 10, 64) + rtimearr[moduleName+instance] = rtimearr[moduleName+instance] / 1000 / 1000 + } + if err != nil { + return nil, err + } + case "wtime": + if issolaris { + // from sec to milli secs + var fwtime float64 + fwtime, err = strconv.ParseFloat((fields[4]), 64) + wtimearr[moduleName+instance] = uint64(fwtime * 1000) + } else { + // from nano to milli secs + wtimearr[moduleName+instance], err = strconv.ParseUint((fields[4]), 10, 64) + wtimearr[moduleName+instance] = wtimearr[moduleName+instance] / 1000 / 1000 + } + if err != nil { + return nil, err + } + } + } + + ret := make(map[string]IOCountersStat, 0) + for k := range dnamearr { + d := IOCountersStat{ + Name: dnamearr[k], + ReadBytes: nreadarr[k], + WriteBytes: nwrittenarr[k], + ReadCount: readsarr[k], + WriteCount: writesarr[k], + ReadTime: rtimearr[k], + WriteTime: wtimearr[k], + } + ret[d.Name] = d + } + return ret, nil } func UsageWithContext(ctx context.Context, path string) (*UsageStat, error) { statvfs := unix.Statvfs_t{} if err := unix.Statvfs(path, &statvfs); err != nil { - return nil, fmt.Errorf("unable to call statvfs(2) on %q: %v", path, err) + return nil, fmt.Errorf("unable to call statvfs(2) on %q: %w", path, err) } usageStat := &UsageStat{ diff --git a/vendor/github.com/shirou/gopsutil/v3/disk/disk_unix.go b/vendor/github.com/shirou/gopsutil/v3/disk/disk_unix.go index bdb62b24..1e735240 100644 --- a/vendor/github.com/shirou/gopsutil/v3/disk/disk_unix.go +++ b/vendor/github.com/shirou/gopsutil/v3/disk/disk_unix.go @@ -1,5 +1,5 @@ -//go:build freebsd || linux || darwin -// +build freebsd linux darwin +//go:build freebsd || linux || darwin || (aix && !cgo) +// +build freebsd linux darwin aix,!cgo package disk @@ -27,13 +27,22 @@ func UsageWithContext(ctx context.Context, path string) (*UsageStat, error) { InodesFree: (uint64(stat.Ffree)), } + ret.Used = (uint64(stat.Blocks) - uint64(stat.Bfree)) * uint64(bsize) + + if (ret.Used + ret.Free) == 0 { + ret.UsedPercent = 0 + } else { + // We don't use ret.Total to calculate percent. + // see https://github.com/shirou/gopsutil/issues/562 + ret.UsedPercent = (float64(ret.Used) / float64(ret.Used+ret.Free)) * 100.0 + } + // if could not get InodesTotal, return empty if ret.InodesTotal < ret.InodesFree { return ret, nil } ret.InodesUsed = (ret.InodesTotal - ret.InodesFree) - ret.Used = (uint64(stat.Blocks) - uint64(stat.Bfree)) * uint64(bsize) if ret.InodesTotal == 0 { ret.InodesUsedPercent = 0 @@ -41,14 +50,6 @@ func UsageWithContext(ctx context.Context, path string) (*UsageStat, error) { ret.InodesUsedPercent = (float64(ret.InodesUsed) / float64(ret.InodesTotal)) * 100.0 } - if (ret.Used + ret.Free) == 0 { - ret.UsedPercent = 0 - } else { - // We don't use ret.Total to calculate percent. - // see https://github.com/shirou/gopsutil/issues/562 - ret.UsedPercent = (float64(ret.Used) / float64(ret.Used+ret.Free)) * 100.0 - } - return ret, nil } diff --git a/vendor/github.com/shirou/gopsutil/v3/disk/disk_windows.go b/vendor/github.com/shirou/gopsutil/v3/disk/disk_windows.go index 5fb9b5b4..5dfd1ca9 100644 --- a/vendor/github.com/shirou/gopsutil/v3/disk/disk_windows.go +++ b/vendor/github.com/shirou/gopsutil/v3/disk/disk_windows.go @@ -15,6 +15,8 @@ import ( "golang.org/x/sys/windows/registry" ) +type Warnings = common.Warnings + var ( procGetDiskFreeSpaceExW = common.Modkernel32.NewProc("GetDiskFreeSpaceExW") procGetLogicalDriveStringsW = common.Modkernel32.NewProc("GetLogicalDriveStringsW") @@ -79,66 +81,93 @@ func UsageWithContext(ctx context.Context, path string) (*UsageStat, error) { return ret, nil } +// PartitionsWithContext returns disk partitions. +// Since GetVolumeInformation doesn't have a timeout, this method uses context to set deadline by users. func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, error) { + warnings := Warnings{ + Verbose: true, + } var ret []PartitionStat + retChan := make(chan []PartitionStat) + errChan := make(chan error) + defer close(retChan) + defer close(errChan) + lpBuffer := make([]byte, 254) - diskret, _, err := procGetLogicalDriveStringsW.Call( - uintptr(len(lpBuffer)), - uintptr(unsafe.Pointer(&lpBuffer[0]))) - if diskret == 0 { - return ret, err - } - for _, v := range lpBuffer { - if v >= 65 && v <= 90 { - path := string(v) + ":" - typepath, _ := windows.UTF16PtrFromString(path) - typeret, _, _ := procGetDriveType.Call(uintptr(unsafe.Pointer(typepath))) - if typeret == 0 { - return ret, windows.GetLastError() - } - // 2: DRIVE_REMOVABLE 3: DRIVE_FIXED 4: DRIVE_REMOTE 5: DRIVE_CDROM - - if typeret == 2 || typeret == 3 || typeret == 4 || typeret == 5 { - lpVolumeNameBuffer := make([]byte, 256) - lpVolumeSerialNumber := int64(0) - lpMaximumComponentLength := int64(0) - lpFileSystemFlags := int64(0) - lpFileSystemNameBuffer := make([]byte, 256) - volpath, _ := windows.UTF16PtrFromString(string(v) + ":/") - driveret, _, err := procGetVolumeInformation.Call( - uintptr(unsafe.Pointer(volpath)), - uintptr(unsafe.Pointer(&lpVolumeNameBuffer[0])), - uintptr(len(lpVolumeNameBuffer)), - uintptr(unsafe.Pointer(&lpVolumeSerialNumber)), - uintptr(unsafe.Pointer(&lpMaximumComponentLength)), - uintptr(unsafe.Pointer(&lpFileSystemFlags)), - uintptr(unsafe.Pointer(&lpFileSystemNameBuffer[0])), - uintptr(len(lpFileSystemNameBuffer))) - if driveret == 0 { - if typeret == 5 || typeret == 2 { - continue // device is not ready will happen if there is no disk in the drive - } - return ret, err - } - opts := []string{"rw"} - if lpFileSystemFlags&fileReadOnlyVolume != 0 { - opts = []string{"ro"} - } - if lpFileSystemFlags&fileFileCompression != 0 { - opts = append(opts, "compress") + + f := func() { + diskret, _, err := procGetLogicalDriveStringsW.Call( + uintptr(len(lpBuffer)), + uintptr(unsafe.Pointer(&lpBuffer[0]))) + if diskret == 0 { + errChan <- err + return + } + for _, v := range lpBuffer { + if v >= 65 && v <= 90 { + path := string(v) + ":" + typepath, _ := windows.UTF16PtrFromString(path) + typeret, _, _ := procGetDriveType.Call(uintptr(unsafe.Pointer(typepath))) + if typeret == 0 { + err := windows.GetLastError() + warnings.Add(err) + continue } + // 2: DRIVE_REMOVABLE 3: DRIVE_FIXED 4: DRIVE_REMOTE 5: DRIVE_CDROM + + if typeret == 2 || typeret == 3 || typeret == 4 || typeret == 5 { + lpVolumeNameBuffer := make([]byte, 256) + lpVolumeSerialNumber := int64(0) + lpMaximumComponentLength := int64(0) + lpFileSystemFlags := int64(0) + lpFileSystemNameBuffer := make([]byte, 256) + volpath, _ := windows.UTF16PtrFromString(string(v) + ":/") + driveret, _, err := procGetVolumeInformation.Call( + uintptr(unsafe.Pointer(volpath)), + uintptr(unsafe.Pointer(&lpVolumeNameBuffer[0])), + uintptr(len(lpVolumeNameBuffer)), + uintptr(unsafe.Pointer(&lpVolumeSerialNumber)), + uintptr(unsafe.Pointer(&lpMaximumComponentLength)), + uintptr(unsafe.Pointer(&lpFileSystemFlags)), + uintptr(unsafe.Pointer(&lpFileSystemNameBuffer[0])), + uintptr(len(lpFileSystemNameBuffer))) + if driveret == 0 { + if typeret == 5 || typeret == 2 { + continue // device is not ready will happen if there is no disk in the drive + } + warnings.Add(err) + continue + } + opts := []string{"rw"} + if lpFileSystemFlags&fileReadOnlyVolume != 0 { + opts = []string{"ro"} + } + if lpFileSystemFlags&fileFileCompression != 0 { + opts = append(opts, "compress") + } - d := PartitionStat{ - Mountpoint: path, - Device: path, - Fstype: string(bytes.Replace(lpFileSystemNameBuffer, []byte("\x00"), []byte(""), -1)), - Opts: opts, + d := PartitionStat{ + Mountpoint: path, + Device: path, + Fstype: string(bytes.Replace(lpFileSystemNameBuffer, []byte("\x00"), []byte(""), -1)), + Opts: opts, + } + ret = append(ret, d) } - ret = append(ret, d) } } + retChan <- ret + } + + go f() + select { + case err := <-errChan: + return ret, err + case ret := <-retChan: + return ret, warnings.Reference() + case <-ctx.Done(): + return ret, ctx.Err() } - return ret, nil } func IOCountersWithContext(ctx context.Context, names ...string) (map[string]IOCountersStat, error) { diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host.go b/vendor/github.com/shirou/gopsutil/v3/host/host.go index 7c53e208..f363eed8 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host.go +++ b/vendor/github.com/shirou/gopsutil/v3/host/host.go @@ -155,3 +155,7 @@ func SensorsTemperatures() ([]TemperatureStat, error) { func timeSince(ts uint64) uint64 { return uint64(time.Now().Unix()) - ts } + +func timeSinceMillis(ts uint64) uint64 { + return uint64(time.Now().UnixMilli()) - ts +} diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_linux.go b/vendor/github.com/shirou/gopsutil/v3/host/host_linux.go index 940415c9..5c07c697 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_linux.go +++ b/vendor/github.com/shirou/gopsutil/v3/host/host_linux.go @@ -19,6 +19,8 @@ import ( "golang.org/x/sys/unix" ) +type Warnings = common.Warnings + type lsbStruct struct { ID string Release string @@ -317,7 +319,7 @@ func KernelVersionWithContext(ctx context.Context) (version string, err error) { if err != nil { return "", err } - return string(utsname.Release[:bytes.IndexByte(utsname.Release[:], 0)]), nil + return unix.ByteSliceToString(utsname.Release[:]), nil } func getSlackwareVersion(contents []string) string { diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_linux_ppc64.go b/vendor/github.com/shirou/gopsutil/v3/host/host_linux_ppc64.go new file mode 100644 index 00000000..5b324eff --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v3/host/host_linux_ppc64.go @@ -0,0 +1,48 @@ +//go:build linux && ppc64 +// +build linux,ppc64 + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_linux.go + +package host + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 + sizeOfUtmp = 0x180 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type utmp struct { + Type int16 + Pad_cgo_0 [2]byte + Pid int32 + Line [32]int8 + Id [4]int8 + User [32]int8 + Host [256]int8 + Exit exit_status + Session int32 + Tv timeval + Addr_v6 [4]int32 + X__glibc_reserved [20]int8 +} + +type exit_status struct { + Termination int16 + Exit int16 +} + +type timeval struct { + Sec int64 + Usec int64 +} diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_posix.go b/vendor/github.com/shirou/gopsutil/v3/host/host_posix.go index 89e63781..24529f19 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_posix.go +++ b/vendor/github.com/shirou/gopsutil/v3/host/host_posix.go @@ -3,14 +3,13 @@ package host -import ( - "bytes" - - "golang.org/x/sys/unix" -) +import "golang.org/x/sys/unix" func KernelArch() (string, error) { var utsname unix.Utsname err := unix.Uname(&utsname) - return string(utsname.Machine[:bytes.IndexByte(utsname.Machine[:], 0)]), err + if err != nil { + return "", err + } + return unix.ByteSliceToString(utsname.Machine[:]), nil } diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_windows.go b/vendor/github.com/shirou/gopsutil/v3/host/host_windows.go index fcd1d590..1fe0551b 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_windows.go +++ b/vendor/github.com/shirou/gopsutil/v3/host/host_windows.go @@ -103,6 +103,14 @@ func numProcs(ctx context.Context) (uint64, error) { } func UptimeWithContext(ctx context.Context) (uint64, error) { + up, err := uptimeMillis() + if err != nil { + return 0, err + } + return uint64((time.Duration(up) * time.Millisecond).Seconds()), nil +} + +func uptimeMillis() (uint64, error) { procGetTickCount := procGetTickCount64 err := procGetTickCount64.Find() if err != nil { @@ -112,7 +120,7 @@ func UptimeWithContext(ctx context.Context) (uint64, error) { if lastErr != 0 { return 0, lastErr } - return uint64((time.Duration(r1) * time.Millisecond).Seconds()), nil + return uint64(r1), nil } // cachedBootTime must be accessed via atomic.Load/StoreUint64 @@ -123,11 +131,11 @@ func BootTimeWithContext(ctx context.Context) (uint64, error) { if t != 0 { return t, nil } - up, err := Uptime() + up, err := uptimeMillis() if err != nil { return 0, err } - t = timeSince(up) + t = uint64((time.Duration(timeSinceMillis(up)) * time.Millisecond).Seconds()) atomic.StoreUint64(&cachedBootTime, t) return t, nil } @@ -188,6 +196,14 @@ func PlatformInformationWithContext(ctx context.Context) (platform string, famil } } + var UBR uint32 // Update Build Revision + err = windows.RegQueryValueEx(h, windows.StringToUTF16Ptr(`UBR`), nil, &valType, nil, &bufLen) + if err == nil { + regBuf := make([]byte, 4) + err = windows.RegQueryValueEx(h, windows.StringToUTF16Ptr(`UBR`), nil, &valType, (*byte)(unsafe.Pointer(®Buf[0])), &bufLen) + copy((*[4]byte)(unsafe.Pointer(&UBR))[:], regBuf) + } + // PlatformFamily switch osInfo.wProductType { case 1: @@ -199,7 +215,9 @@ func PlatformInformationWithContext(ctx context.Context) (platform string, famil } // Platform Version - version = fmt.Sprintf("%d.%d.%d Build %d", osInfo.dwMajorVersion, osInfo.dwMinorVersion, osInfo.dwBuildNumber, osInfo.dwBuildNumber) + version = fmt.Sprintf("%d.%d.%d.%d Build %d.%d", + osInfo.dwMajorVersion, osInfo.dwMinorVersion, osInfo.dwBuildNumber, UBR, + osInfo.dwBuildNumber, UBR) return platform, family, version, nil } diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common.go b/vendor/github.com/shirou/gopsutil/v3/internal/common/common.go index adc4922b..323402dc 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common.go +++ b/vendor/github.com/shirou/gopsutil/v3/internal/common/common.go @@ -114,8 +114,8 @@ func ReadLines(filename string) ([]string, error) { // ReadLinesOffsetN reads contents from file and splits them by new line. // The offset tells at which line number to start. // The count determines the number of lines to read (starting from offset): -// n >= 0: at most n lines -// n < 0: whole file +// n >= 0: at most n lines +// n < 0: whole file func ReadLinesOffsetN(filename string, offset uint, n int) ([]string, error) { f, err := os.Open(filename) if err != nil { @@ -364,14 +364,8 @@ func HostDev(combineWith ...string) string { return GetEnv("HOST_DEV", "/dev", combineWith...) } -// MockEnv set environment variable and return revert function. -// MockEnv should be used testing only. -func MockEnv(key string, value string) func() { - original := os.Getenv(key) - os.Setenv(key, value) - return func() { - os.Setenv(key, original) - } +func HostRoot(combineWith ...string) string { + return GetEnv("HOST_ROOT", "/", combineWith...) } // getSysctrlEnv sets LC_ALL=C in a list of env vars for use when running diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_linux.go b/vendor/github.com/shirou/gopsutil/v3/internal/common/common_linux.go index da44c3f2..fa6373b5 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_linux.go +++ b/vendor/github.com/shirou/gopsutil/v3/internal/common/common_linux.go @@ -12,6 +12,7 @@ import ( "strconv" "strings" "sync" + "syscall" "time" ) @@ -68,6 +69,17 @@ func BootTimeWithContext(ctx context.Context) (uint64, error) { filename := HostProc(statFile) lines, err := ReadLines(filename) + if os.IsPermission(err) { + var info syscall.Sysinfo_t + err := syscall.Sysinfo(&info) + if err != nil { + return 0, err + } + + currentTime := time.Now().UnixNano() / int64(time.Second) + t := currentTime - int64(info.Uptime) + return uint64(t), nil + } if err != nil { return 0, err } @@ -149,6 +161,9 @@ func VirtualizationWithContext(ctx context.Context) (string, string, error) { if StringsContains(contents, "kvm") { system = "kvm" role = "host" + } else if StringsContains(contents, "hv_util") { + system = "hyperv" + role = "guest" } else if StringsContains(contents, "vboxdrv") { system = "vbox" role = "host" @@ -244,6 +259,11 @@ func VirtualizationWithContext(ctx context.Context) (string, string, error) { } } + if PathExists(HostRoot(".dockerenv")) { + system = "docker" + role = "guest" + } + // before returning for the first time, cache the system and role cachedVirtOnce.Do(func() { cachedVirtMutex.Lock() diff --git a/vendor/github.com/shirou/gopsutil/v3/host/types.go b/vendor/github.com/shirou/gopsutil/v3/internal/common/warnings.go similarity index 59% rename from vendor/github.com/shirou/gopsutil/v3/host/types.go rename to vendor/github.com/shirou/gopsutil/v3/internal/common/warnings.go index c2e7c0bd..a4aaadaf 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/types.go +++ b/vendor/github.com/shirou/gopsutil/v3/internal/common/warnings.go @@ -1,11 +1,10 @@ -package host +package common -import ( - "fmt" -) +import "fmt" type Warnings struct { - List []error + List []error + Verbose bool } func (w *Warnings) Add(err error) { @@ -20,5 +19,12 @@ func (w *Warnings) Reference() error { } func (w *Warnings) Error() string { + if w.Verbose { + str := "" + for i, e := range w.List { + str += fmt.Sprintf("\tError %d: %s\n", i, e.Error()) + } + return str + } return fmt.Sprintf("Number of warnings: %v", len(w.List)) } diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_solaris.go b/vendor/github.com/shirou/gopsutil/v3/mem/mem_solaris.go index 88f05f65..c911267e 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_solaris.go +++ b/vendor/github.com/shirou/gopsutil/v3/mem/mem_solaris.go @@ -12,6 +12,7 @@ import ( "strings" "github.com/shirou/gopsutil/v3/internal/common" + "github.com/tklauser/go-sysconf" ) // VirtualMemory for Solaris is a minimal implementation which only returns @@ -34,6 +35,13 @@ func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { return nil, err } result.Total = cap + freemem, err := globalZoneFreeMemory(ctx) + if err != nil { + return nil, err + } + result.Available = freemem + result.Free = freemem + result.Used = result.Total - result.Free } else { cap, err := nonGlobalZoneMemoryCapacity() if err != nil { @@ -85,6 +93,25 @@ func globalZoneMemoryCapacity() (uint64, error) { return totalMB * 1024 * 1024, nil } +func globalZoneFreeMemory(ctx context.Context) (uint64, error) { + output, err := invoke.CommandWithContext(ctx, "pagesize") + if err != nil { + return 0, err + } + + pagesize, err := strconv.ParseUint(strings.TrimSpace(string(output)), 10, 64) + if err != nil { + return 0, err + } + + free, err := sysconf.Sysconf(sysconf.SC_AVPHYS_PAGES) + if err != nil { + return 0, err + } + + return uint64(free) * pagesize, nil +} + var kstatMatch = regexp.MustCompile(`(\S+)\s+(\S*)`) func nonGlobalZoneMemoryCapacity() (uint64, error) { diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_fallback.go b/vendor/github.com/shirou/gopsutil/v3/net/net_fallback.go index 58325f65..e136be1b 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_fallback.go +++ b/vendor/github.com/shirou/gopsutil/v3/net/net_fallback.go @@ -1,5 +1,5 @@ -//go:build !aix && !darwin && !linux && !freebsd && !openbsd && !windows -// +build !aix,!darwin,!linux,!freebsd,!openbsd,!windows +//go:build !aix && !darwin && !linux && !freebsd && !openbsd && !windows && !solaris +// +build !aix,!darwin,!linux,!freebsd,!openbsd,!windows,!solaris package net diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_linux.go b/vendor/github.com/shirou/gopsutil/v3/net/net_linux.go index c0899719..c7cd0db1 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_linux.go +++ b/vendor/github.com/shirou/gopsutil/v3/net/net_linux.go @@ -161,7 +161,7 @@ var netProtocols = []string{ // If protocols is empty then all protocols are returned, otherwise // just the protocols in the list are returned. // Available protocols: -// ip,icmp,icmpmsg,tcp,udp,udplite +// [ip,icmp,icmpmsg,tcp,udp,udplite] func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { return ProtoCountersWithContext(context.Background(), protocols) } diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_solaris.go b/vendor/github.com/shirou/gopsutil/v3/net/net_solaris.go new file mode 100644 index 00000000..7f1f5c86 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v3/net/net_solaris.go @@ -0,0 +1,143 @@ +//go:build solaris +// +build solaris + +package net + +import ( + "context" + "fmt" + "regexp" + "runtime" + "strconv" + "strings" + + "github.com/shirou/gopsutil/v3/internal/common" +) + +// NetIOCounters returnes network I/O statistics for every network +// interface installed on the system. If pernic argument is false, +// return only sum of all information (which name is 'all'). If true, +// every network interface installed on the system is returned +// separately. +func IOCounters(pernic bool) ([]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), pernic) +} + +func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { + // collect all the net class's links with below statistics + filterstr := "/^(?!vnic)/::phys:/^rbytes64$|^ipackets64$|^idrops64$|^ierrors$|^obytes64$|^opackets64$|^odrops64$|^oerrors$/" + if runtime.GOOS == "illumos" { + filterstr = "/[^vnic]/::mac:/^rbytes64$|^ipackets64$|^idrops64$|^ierrors$|^obytes64$|^opackets64$|^odrops64$|^oerrors$/" + } + kstatSysOut, err := invoke.CommandWithContext(ctx, "kstat", "-c", "net", "-p", filterstr) + if err != nil { + return nil, fmt.Errorf("cannot execute kstat: %w", err) + } + + lines := strings.Split(strings.TrimSpace(string(kstatSysOut)), "\n") + if len(lines) == 0 { + return nil, fmt.Errorf("no interface found") + } + rbytes64arr := make(map[string]uint64) + ipackets64arr := make(map[string]uint64) + idrops64arr := make(map[string]uint64) + ierrorsarr := make(map[string]uint64) + obytes64arr := make(map[string]uint64) + opackets64arr := make(map[string]uint64) + odrops64arr := make(map[string]uint64) + oerrorsarr := make(map[string]uint64) + + re := regexp.MustCompile(`[:\s]+`) + for _, line := range lines { + fields := re.Split(line, -1) + interfaceName := fields[0] + instance := fields[1] + switch fields[3] { + case "rbytes64": + rbytes64arr[interfaceName+instance], err = strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, fmt.Errorf("cannot parse rbytes64: %w", err) + } + case "ipackets64": + ipackets64arr[interfaceName+instance], err = strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, fmt.Errorf("cannot parse ipackets64: %w", err) + } + case "idrops64": + idrops64arr[interfaceName+instance], err = strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, fmt.Errorf("cannot parse idrops64: %w", err) + } + case "ierrors": + ierrorsarr[interfaceName+instance], err = strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, fmt.Errorf("cannot parse ierrors: %w", err) + } + case "obytes64": + obytes64arr[interfaceName+instance], err = strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, fmt.Errorf("cannot parse obytes64: %w", err) + } + case "opackets64": + opackets64arr[interfaceName+instance], err = strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, fmt.Errorf("cannot parse opackets64: %w", err) + } + case "odrops64": + odrops64arr[interfaceName+instance], err = strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, fmt.Errorf("cannot parse odrops64: %w", err) + } + case "oerrors": + oerrorsarr[interfaceName+instance], err = strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, fmt.Errorf("cannot parse oerrors: %w", err) + } + } + } + ret := make([]IOCountersStat, 0) + for k := range rbytes64arr { + nic := IOCountersStat{ + Name: k, + BytesRecv: rbytes64arr[k], + PacketsRecv: ipackets64arr[k], + Errin: ierrorsarr[k], + Dropin: idrops64arr[k], + BytesSent: obytes64arr[k], + PacketsSent: opackets64arr[k], + Errout: oerrorsarr[k], + Dropout: odrops64arr[k], + } + ret = append(ret, nic) + } + + if !pernic { + return getIOCountersAll(ret) + } + + return ret, nil +} + +func Connections(kind string) ([]ConnectionStat, error) { + return ConnectionsWithContext(context.Background(), kind) +} + +func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { + return []ConnectionStat{}, common.ErrNotImplementedError +} + +func FilterCounters() ([]FilterStat, error) { + return FilterCountersWithContext(context.Background()) +} + +func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { + return []FilterStat{}, common.ErrNotImplementedError +} + +func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { + return ProtoCountersWithContext(context.Background(), protocols) +} + +func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { + return []ProtoCountersStat{}, common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_unix.go b/vendor/github.com/shirou/gopsutil/v3/net/net_unix.go index 2fd2224f..cb846e28 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_unix.go +++ b/vendor/github.com/shirou/gopsutil/v3/net/net_unix.go @@ -20,7 +20,7 @@ func Connections(kind string) ([]ConnectionStat, error) { } func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { - return ConnectionsPid(kind, 0) + return ConnectionsPidWithContext(ctx, kind, 0) } // Return a list of network connections opened returning at most `max` diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_cgo.go b/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_cgo.go index 2ac413f1..858f08e7 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_cgo.go +++ b/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_cgo.go @@ -175,6 +175,7 @@ func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) { func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { const tiSize = C.sizeof_struct_proc_taskinfo ti := (*C.struct_proc_taskinfo)(C.malloc(tiSize)) + defer C.free(unsafe.Pointer(ti)) _, err := C.proc_pidinfo(C.int(p.Pid), C.PROC_PIDTASKINFO, 0, unsafe.Pointer(ti), tiSize) if err != nil { @@ -187,6 +188,7 @@ func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) { const tiSize = C.sizeof_struct_proc_taskinfo ti := (*C.struct_proc_taskinfo)(C.malloc(tiSize)) + defer C.free(unsafe.Pointer(ti)) _, err := C.proc_pidinfo(C.int(p.Pid), C.PROC_PIDTASKINFO, 0, unsafe.Pointer(ti), tiSize) if err != nil { @@ -204,6 +206,7 @@ func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) { const tiSize = C.sizeof_struct_proc_taskinfo ti := (*C.struct_proc_taskinfo)(C.malloc(tiSize)) + defer C.free(unsafe.Pointer(ti)) _, err := C.proc_pidinfo(C.int(p.Pid), C.PROC_PIDTASKINFO, 0, unsafe.Pointer(ti), tiSize) if err != nil { diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_windows.go b/vendor/github.com/shirou/gopsutil/v3/process/process_windows.go index 18f4f945..14ed0309 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_windows.go +++ b/vendor/github.com/shirou/gopsutil/v3/process/process_windows.go @@ -10,6 +10,7 @@ import ( "fmt" "io" "os" + "path/filepath" "reflect" "strings" "syscall" @@ -319,18 +320,19 @@ func (p *Process) PpidWithContext(ctx context.Context) (int32, error) { } func (p *Process) NameWithContext(ctx context.Context) (string, error) { - ppid, _, name, err := getFromSnapProcess(p.Pid) - if err != nil { - return "", fmt.Errorf("could not get Name: %s", err) + if p.Pid == 0 { + return "System Idle Process", nil + } + if p.Pid == 4 { + return "System", nil } - // if no errors and not cached already, cache ppid - p.parent = ppid - if 0 == p.getPpid() { - p.setPpid(ppid) + exe, err := p.ExeWithContext(ctx) + if err != nil { + return "", fmt.Errorf("could not get Name: %s", err) } - return name, nil + return filepath.Base(exe), nil } func (p *Process) TgidWithContext(ctx context.Context) (int32, error) { @@ -408,7 +410,7 @@ func (p *Process) CwdWithContext(_ context.Context) (string, error) { } if userProcParams.CurrentDirectoryPathNameLength > 0 { cwd := readProcessMemory(syscall.Handle(h), procIs32Bits, uint64(userProcParams.CurrentDirectoryPathAddress), uint(userProcParams.CurrentDirectoryPathNameLength)) - if len(cwd) != int(userProcParams.CurrentDirectoryPathAddress) { + if len(cwd) != int(userProcParams.CurrentDirectoryPathNameLength) { return "", errors.New("cannot read current working directory") } @@ -987,15 +989,9 @@ func is32BitProcess(h windows.Handle) bool { var procIs32Bits bool switch processorArchitecture { - case PROCESSOR_ARCHITECTURE_INTEL: - fallthrough - case PROCESSOR_ARCHITECTURE_ARM: + case PROCESSOR_ARCHITECTURE_INTEL, PROCESSOR_ARCHITECTURE_ARM: procIs32Bits = true - case PROCESSOR_ARCHITECTURE_ARM64: - fallthrough - case PROCESSOR_ARCHITECTURE_IA64: - fallthrough - case PROCESSOR_ARCHITECTURE_AMD64: + case PROCESSOR_ARCHITECTURE_ARM64, PROCESSOR_ARCHITECTURE_IA64, PROCESSOR_ARCHITECTURE_AMD64: var wow64 uint ret, _, _ := common.ProcNtQueryInformationProcess.Call( diff --git a/vendor/github.com/shoenig/test/LICENSE b/vendor/github.com/shoenig/test/LICENSE new file mode 100644 index 00000000..e87a115e --- /dev/null +++ b/vendor/github.com/shoenig/test/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/shoenig/test/interfaces/interfaces.go b/vendor/github.com/shoenig/test/interfaces/interfaces.go new file mode 100644 index 00000000..9f4f26e8 --- /dev/null +++ b/vendor/github.com/shoenig/test/interfaces/interfaces.go @@ -0,0 +1,68 @@ +package interfaces + +import ( + "math" + + "github.com/shoenig/test/internal/constraints" +) + +// EqualFunc represents a type implementing the Equal method. +type EqualFunc[A any] interface { + Equal(A) bool +} + +// LessFunc represents any type implementing the Less method. +type LessFunc[A any] interface { + Less(A) bool +} + +// Map represents any map type where keys are comparable. +type Map[K comparable, V any] interface { + ~map[K]V +} + +// MapEqualFunc represents any map type where keys are comparable and values implement .Equal method. +type MapEqualFunc[K comparable, V EqualFunc[V]] interface { + ~map[K]V +} + +// Number is float, integer, or complex. +type Number interface { + constraints.Ordered + constraints.Float | constraints.Integer | constraints.Complex +} + +// Numeric returns false if n is Inf/NaN. +// +// Always returns true for integral values. +func Numeric[N Number](n N) bool { + check := func(f float64) bool { + if math.IsNaN(f) { + return false + } else if math.IsInf(f, 0) { + return false + } + return true + } + return check(float64(n)) +} + +// The LengthFunc interface is satisfied by a type that implements Len(). +type LengthFunc interface { + Len() int +} + +// The SizeFunc interface is satisfied by a type that implements Size(). +type SizeFunc interface { + Size() int +} + +// The EmptyFunc interface is satisfied by a type that implements Empty(). +type EmptyFunc interface { + Empty() bool +} + +// The ContainsFunc interface is satisfied by a type that implements Contains(T). +type ContainsFunc[T any] interface { + Contains(T) bool +} diff --git a/vendor/github.com/shoenig/test/internal/assertions/assertions.go b/vendor/github.com/shoenig/test/internal/assertions/assertions.go new file mode 100644 index 00000000..3ded105f --- /dev/null +++ b/vendor/github.com/shoenig/test/internal/assertions/assertions.go @@ -0,0 +1,1229 @@ +package assertions + +import ( + "encoding/json" + "errors" + "fmt" + "io/fs" + "os" + "path/filepath" + "reflect" + "regexp" + "runtime" + "strings" + + "github.com/google/go-cmp/cmp" + "github.com/shoenig/test/interfaces" + "github.com/shoenig/test/internal/constraints" + "github.com/shoenig/test/wait" +) + +const depth = 4 + +func Caller() string { + _, file, line, ok := runtime.Caller(depth) + if ok { + file = filepath.Base(file) + return fmt.Sprintf("%s:%d: ", file, line) + } + return "[???]" +} + +// diff creates a diff of a and b using cmp.Diff if possible, falling back to printing +// the Go string values of both types (e.g. contains unexported fields). +func diff[A, B any](a A, b B, opts cmp.Options) (s string) { + defer func() { + if r := recover(); r != nil { + s = fmt.Sprintf("↪ Assertion | comparison ↷\na: %#v\nb: %#v\n", a, b) + } + }() + s = "↪ Assertion | differential ↷\n" + cmp.Diff(a, b, opts) + return +} + +// equal compares a and b using cmp.Equal if possible, falling back to reflect.DeepEqual +// (e.g. contains unexported fields). +func equal[A, B any](a A, b B, opts cmp.Options) (result bool) { + defer func() { + if r := recover(); r != nil { + result = reflect.DeepEqual(a, b) + } + }() + result = cmp.Equal(a, b, opts) + return +} + +func contains[C comparable](slice []C, item C) bool { + found := false + for i := 0; i < len(slice); i++ { + if slice[i] == item { + found = true + break + } + } + return found +} + +func containsFunc[A, B any](slice []A, item B, eq func(a A, b B) bool) bool { + found := false + for i := 0; i < len(slice); i++ { + if eq(slice[i], item) { + found = true + break + } + } + return found +} + +func isNil(a any) bool { + // comparable check only works for simple types + if a == nil { + return true + } + + // check for non-nil nil types + value := reflect.ValueOf(a) + switch value.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return value.IsNil() + default: + return false + } +} + +func Nil(a any) (s string) { + if !isNil(a) { + s = "expected to be nil; is not nil\n" + } + return +} + +func NotNil(a any) (s string) { + if isNil(a) { + s = "expected to not be nil; is nil\n" + } + return +} + +func True(condition bool) (s string) { + if !condition { + s = "expected condition to be true; is false\n" + } + return +} + +func False(condition bool) (s string) { + if condition { + s = "expected condition to be false; is true\n" + } + return +} + +func Unreachable() (s string) { + s = "expected not to execute this code path\n" + return +} + +func Error(err error) (s string) { + if err == nil { + s = "expected non-nil error; is nil\n" + } + return +} + +func EqError(err error, msg string) (s string) { + if err == nil { + s = "expected error; got nil\n" + return + } + e := err.Error() + if e != msg { + s = "expected matching error strings\n" + s += bullet("msg: %q\n", msg) + s += bullet("err: %q\n", e) + } + return +} + +func ErrorIs(err error, target error) (s string) { + if err == nil { + s = "expected error; got nil\n" + return + } + if !errors.Is(err, target) { + s = "expected errors.Is match\n" + s += bullet("error: %v\n", err) + s += bullet("target: %v\n", target) + } + return +} + +func NoError(err error) (s string) { + if err != nil { + s = "expected nil error\n" + s += bullet("error: %v\n", err) + } + return +} + +func ErrorContains(err error, sub string) (s string) { + if err == nil { + s = "expected non-nil error\n" + return + } + actual := err.Error() + if !strings.Contains(actual, sub) { + s = "expected error to contain substring\n" + s += bullet("substring: %s\n", sub) + s += bullet(" err: %s\n", actual) + } + return +} + +func Eq[A any](exp, val A, opts ...cmp.Option) (s string) { + if !equal(exp, val, opts) { + s = "expected equality via cmp.Equal function\n" + s += diff(exp, val, opts) + } + return +} + +func NotEq[A any](exp, val A, opts ...cmp.Option) (s string) { + if equal(exp, val, opts) { + s = "expected inequality via cmp.Equal function\n" + } + return +} + +func EqOp[C comparable](exp, val C) (s string) { + if exp != val { + s = "expected equality via ==\n" + s += diff(exp, val, nil) + } + return +} + +func EqFunc[A any](exp, val A, eq func(a, b A) bool) (s string) { + if !eq(exp, val) { + s = "expected equality via 'eq' function\n" + s += diff(exp, val, nil) + } + return +} + +func NotEqOp[C comparable](exp, val C) (s string) { + if exp == val { + s = "expected inequality via !=\n" + } + return +} + +func NotEqFunc[A any](exp, val A, eq func(a, b A) bool) (s string) { + if eq(exp, val) { + s = "expected inequality via 'eq' function\n" + } + return +} + +func EqJSON(exp, val string) (s string) { + var expA, expB any + + if err := json.Unmarshal([]byte(exp), &expA); err != nil { + s = fmt.Sprintf("failed to unmarshal first argument as json: %v\n", err) + return + } + + if err := json.Unmarshal([]byte(val), &expB); err != nil { + s = fmt.Sprintf("failed to unmarshal second argument as json: %v\n", err) + return + } + + if !reflect.DeepEqual(expA, expB) { + jsonA, _ := json.Marshal(expA) + jsonB, _ := json.Marshal(expB) + s = "expected equality via json marshalling\n" + s += diff(string(jsonA), string(jsonB), nil) + return + } + + return +} + +func ValidJSON(input string) (s string) { + return validJSON([]byte(input)) +} + +func ValidJSONBytes(input []byte) (s string) { + return validJSON(input) +} + +func validJSON(input []byte) (s string) { + if !json.Valid([]byte(input)) { + return "expected input to be valid json\n" + } + return +} + +func EqSliceFunc[A any](exp, val []A, eq func(a, b A) bool) (s string) { + lenA, lenB := len(exp), len(val) + + if lenA != lenB { + s = "expected slices of same length\n" + s += bullet("len(exp): %d\n", lenA) + s += bullet("len(val): %d\n", lenB) + s += diff(exp, val, nil) + return + } + + miss := false + for i := 0; i < lenA; i++ { + if !eq(exp[i], val[i]) { + miss = true + break + } + } + + if miss { + s = "expected slice equality via 'eq' function\n" + s += diff(exp, val, nil) + return + } + + return +} + +func Equal[E interfaces.EqualFunc[E]](exp, val E) (s string) { + if !val.Equal(exp) { + s = "expected equality via .Equal method\n" + s += diff(exp, val, nil) + } + return +} + +func NotEqual[E interfaces.EqualFunc[E]](exp, val E) (s string) { + if val.Equal(exp) { + s = "expected inequality via .Equal method\n" + s += diff(exp, val, nil) + } + return +} + +func SliceEqual[E interfaces.EqualFunc[E]](exp, val []E) (s string) { + lenA, lenB := len(exp), len(val) + + if lenA != lenB { + s = "expected slices of same length\n" + s += bullet("len(exp): %d\n", lenA) + s += bullet("len(val): %d\n", lenB) + s += diff(exp, val, nil) + return + } + + for i := 0; i < lenA; i++ { + if !exp[i].Equal(val[i]) { + s += "expected slice equality via .Equal method\n" + s += diff(exp[i], val[i], nil) + return + } + } + return +} + +func Lesser[L interfaces.LessFunc[L]](exp, val L) (s string) { + if !val.Less(exp) { + s = "expected val to be less via .Less method\n" + s += diff(exp, val, nil) + } + return +} + +func SliceEmpty[A any](slice []A) (s string) { + if len(slice) != 0 { + s = "expected slice to be empty\n" + s += bullet("len(slice): %d\n", len(slice)) + } + return +} + +func SliceNotEmpty[A any](slice []A) (s string) { + if len(slice) == 0 { + s = "expected slice to not be empty\n" + s += bullet("len(slice): %d\n", len(slice)) + } + return +} + +func SliceLen[A any](n int, slice []A) (s string) { + if l := len(slice); l != n { + s = "expected slice to be different length\n" + s += bullet("len(slice): %d, expected: %d\n", l, n) + } + return +} + +func SliceContainsOp[C comparable](slice []C, item C) (s string) { + if !contains(slice, item) { + s = "expected slice to contain missing item via == operator\n" + s += bullet("slice is missing %#v\n", item) + } + return +} + +func SliceContainsFunc[A, B any](slice []A, item B, eq func(a A, b B) bool) (s string) { + if !containsFunc(slice, item, eq) { + s = "expected slice to contain missing item via 'eq' function\n" + s += bullet("slice is missing %#v\n", item) + } + return +} + +func SliceContainsEqual[E interfaces.EqualFunc[E]](slice []E, item E) (s string) { + if !containsFunc(slice, item, E.Equal) { + s = "expected slice to contain missing item via .Equal method\n" + s += bullet("slice is missing %#v\n", item) + } + return +} + +func SliceContains[A any](slice []A, item A, opts ...cmp.Option) (s string) { + for _, i := range slice { + if cmp.Equal(i, item, opts...) { + return + } + } + s = "expected slice to contain missing item via cmp.Equal method\n" + s += bullet("slice is missing %#v\n", item) + return +} + +func SliceNotContains[A any](slice []A, item A, opts ...cmp.Option) (s string) { + for _, i := range slice { + if cmp.Equal(i, item, opts...) { + s = "expected slice to not contain item but it does\n" + s += bullet("unwanted item %#v\n", item) + return + } + } + return +} + +func SliceContainsAll[A any](slice, items []A) (s string) { + if len(slice) != len(items) { + s = "expected slice and items to contain same number of elements\n" + s += bullet("len(slice): %d\n", len(slice)) + s += bullet("len(items): %d\n", len(items)) + return s + } + return SliceContainsSubset(slice, items) +} + +func SliceContainsSubset[A any](slice, items []A) (s string) { +OUTER: + for _, target := range items { + var item A + for _, item = range slice { + if cmp.Equal(target, item) { + continue OUTER + } + } + s = "expected slice to contain missing item\n" + s += bullet("slice is missing %#v\n", item) + return + } + return +} + +func Positive[N interfaces.Number](value N) (s string) { + if !(value > 0) { + s = "expected positive value\n" + s += bullet("value: %v\n", value) + } + return +} + +func NonPositive[N interfaces.Number](value N) (s string) { + if !(value <= 0) { + s = "expected non-positive value\n" + s += bullet("value: %v\n", value) + } + return +} + +func Negative[N interfaces.Number](value N) (s string) { + if value > 0 { + s = "expected negative value\n" + s += bullet("value: %v\n", value) + } + return +} + +func NonNegative[N interfaces.Number](value N) (s string) { + if !(value >= 0) { + s = "expected non-negative value\n" + s += bullet("value: %v\n", value) + } + return +} + +func Zero[N interfaces.Number](value N) (s string) { + if value != 0 { + s = "expected value of 0\n" + s += bullet("value: %v\n", value) + } + return +} + +func NonZero[N interfaces.Number](value N) (s string) { + if value == 0 { + s = "expected non-zero value\n" + s += bullet("value: %v\n", value) + } + return +} + +func One[N interfaces.Number](value N) (s string) { + if value != 1 { + s = "expected value of 1\n" + s += bullet("value: %v\n", value) + } + return +} + +func Less[O constraints.Ordered](exp, val O) (s string) { + if !(val < exp) { + s = fmt.Sprintf("expected %v < %v\n", val, exp) + } + return +} + +func LessEq[O constraints.Ordered](exp, val O) (s string) { + if !(val <= exp) { + s = fmt.Sprintf("expected %v ≤ %v\n", val, exp) + } + return +} + +func Greater[O constraints.Ordered](exp, val O) (s string) { + if !(val > exp) { + s = fmt.Sprintf("expected %v > %v\n", val, exp) + } + return +} + +func GreaterEq[O constraints.Ordered](exp, val O) (s string) { + if !(val >= exp) { + s = fmt.Sprintf("expected %v ≥ %v\n", val, exp) + } + return +} + +func Between[O constraints.Ordered](lower, val, upper O) (s string) { + if val < lower || val > upper { + s = fmt.Sprintf("expected val in range (%v ≤ val ≤ %v)\n", lower, upper) + s += bullet("val: %v\n", val) + return + } + return +} + +func BetweenExclusive[O constraints.Ordered](lower, val, upper O) (s string) { + if val <= lower || val >= upper { + s = fmt.Sprintf("expected val in range (%v < val < %v)\n", lower, upper) + s += bullet("val: %v\n", val) + return + } + return +} + +func Ascending[O constraints.Ordered](slice []O) (s string) { + for i := 0; i < len(slice)-1; i++ { + if slice[i] > slice[i+1] { + s = fmt.Sprintf("expected slice[%d] <= slice[%d]\n", i, i+1) + s += bullet("slice[%d]: %v\n", i, slice[i]) + s += bullet("slice[%d]: %v\n", i+1, slice[i+1]) + return + } + } + return +} + +func AscendingFunc[A any](slice []A, less func(a, b A) bool) (s string) { + for i := 0; i < len(slice)-1; i++ { + if !less(slice[i], slice[i+1]) { + s = fmt.Sprintf("expected less(slice[%d], slice[%d])\n", i, i+1) + s += bullet("slice[%d]: %v\n", i, slice[i]) + s += bullet("slice[%d]: %v\n", i+1, slice[i+1]) + return + } + } + return +} + +func AscendingLess[L interfaces.LessFunc[L]](slice []L) (s string) { + for i := 0; i < len(slice)-1; i++ { + if !slice[i].Less(slice[i+1]) { + s = fmt.Sprintf("expected slice[%d].Less(slice[%d])\n", i, i+1) + s += bullet("slice[%d]: %v\n", i, slice[i]) + s += bullet("slice[%d]: %v\n", i+1, slice[i+1]) + return + } + } + return +} + +func Descending[O constraints.Ordered](slice []O) (s string) { + for i := 0; i < len(slice)-1; i++ { + if slice[i] < slice[i+1] { + s = fmt.Sprintf("expected slice[%d] >= slice[%d]\n", i, i+1) + s += bullet("slice[%d]: %v\n", i, slice[i]) + s += bullet("slice[%d]: %v\n", i+1, slice[i+1]) + return + } + } + return +} + +func DescendingFunc[A any](slice []A, less func(a, b A) bool) (s string) { + for i := 0; i < len(slice)-1; i++ { + if !less(slice[i+1], slice[i]) { + s = fmt.Sprintf("expected less(slice[%d], slice[%d])\n", i+1, i) + s += bullet("slice[%d]: %v\n", i, slice[i]) + s += bullet("slice[%d]: %v\n", i+1, slice[i+1]) + return + } + } + return +} + +func DescendingLess[L interfaces.LessFunc[L]](slice []L) (s string) { + for i := 0; i < len(slice)-1; i++ { + if !(slice[i+1].Less(slice[i])) { + s = fmt.Sprintf("expected slice[%d].Less(slice[%d])\n", i+1, i) + s += bullet("slice[%d]: %v\n", i, slice[i]) + s += bullet("slice[%d]: %v\n", i+1, slice[i+1]) + return + } + } + return +} + +func InDelta[N interfaces.Number](a, b, delta N) (s string) { + var zero N + + if !interfaces.Numeric(delta) { + s = fmt.Sprintf("delta must be numeric; got %v\n", delta) + return + } + + if delta <= zero { + s = fmt.Sprintf("delta must be positive; got %v\n", delta) + return + } + + if !interfaces.Numeric(a) { + s = fmt.Sprintf("first argument must be numeric; got %v\n", a) + return + } + + if !interfaces.Numeric(b) { + s = fmt.Sprintf("second argument must be numeric; got %v\n", b) + return + } + + difference := a - b + if difference < -delta || difference > delta { + s = fmt.Sprintf("%v and %v not within %v\n", a, b, delta) + return + } + + return +} + +func InDeltaSlice[N interfaces.Number](a, b []N, delta N) (s string) { + if len(a) != len(b) { + s = "expected slices of same length\n" + s += bullet("len(slice a): %d\n", len(a)) + s += bullet("len(slice b): %d\n", len(b)) + return + } + + for i := 0; i < len(a); i++ { + if s = InDelta(a[i], b[i], delta); s != "" { + return + } + } + return +} + +func MapEq[M1, M2 interfaces.Map[K, V], K comparable, V any](exp M1, val M2, opts cmp.Options) (s string) { + lenA, lenB := len(exp), len(val) + + if lenA != lenB { + s = "expected maps of same length\n" + s += bullet("len(exp): %d\n", lenA) + s += bullet("len(val): %d\n", lenB) + return + } + + for key, valA := range exp { + valB, exists := val[key] + if !exists { + s = "expected maps of same keys\n" + s += diff(exp, val, opts) + return + } + + if !cmp.Equal(valA, valB, opts) { + s = "expected maps of same values via cmp.Equal function\n" + s += diff(exp, val, opts) + return + } + } + return +} + +func MapEqFunc[M1, M2 interfaces.Map[K, V], K comparable, V any](exp M1, val M2, eq func(V, V) bool) (s string) { + lenA, lenB := len(exp), len(val) + + if lenA != lenB { + s = "expected maps of same length\n" + s += bullet("len(exp): %d\n", lenA) + s += bullet("len(val): %d\n", lenB) + return + } + + for key, valA := range exp { + valB, exists := val[key] + if !exists { + s = "expected maps of same keys\n" + s += diff(exp, val, nil) + return + } + + if !eq(valA, valB) { + s = "expected maps of same values via 'eq' function\n" + s += diff(exp, val, nil) + return + } + } + return +} + +func MapEqual[M interfaces.MapEqualFunc[K, V], K comparable, V interfaces.EqualFunc[V]](exp, val M) (s string) { + lenA, lenB := len(exp), len(val) + + if lenA != lenB { + s = "expected maps of same length\n" + s += bullet("len(exp): %d\n", lenA) + s += bullet("len(val): %d\n", lenB) + return + } + + for key, valA := range exp { + valB, exists := val[key] + if !exists { + s = "expected maps of same keys\n" + s += diff(exp, val, nil) + return + } + + if !(valB).Equal(valA) { + s = "expected maps of same values via .Equal method\n" + s += diff(exp, val, nil) + return + } + } + + return +} + +func MapLen[M ~map[K]V, K comparable, V any](n int, m M) (s string) { + if l := len(m); l != n { + s = "expected map to be different length\n" + s += bullet("len(map): %d, expected: %d\n", l, n) + } + return +} + +func MapEmpty[M ~map[K]V, K comparable, V any](m M) (s string) { + if l := len(m); l > 0 { + s = "expected map to be empty\n" + s += bullet("len(map): %d\n", l) + } + return +} + +func MapNotEmpty[M ~map[K]V, K comparable, V any](m M) (s string) { + if l := len(m); l == 0 { + s = "expected map to not be empty\n" + s += bullet("len(map): %d\n", l) + } + return +} + +func MapContainsKey[M ~map[K]V, K comparable, V any](m M, key K) (s string) { + if _, exists := m[key]; !exists { + s = "expected map to contain key\n" + s += bullet("key: %v\n", key) + } + return +} + +func MapNotContainsKey[M ~map[K]V, K comparable, V any](m M, key K) (s string) { + if _, exists := m[key]; exists { + s = "expected map to not contain key\n" + s += bullet("key: %v\n", key) + } + return +} + +func MapContainsKeys[M ~map[K]V, K comparable, V any](m M, keys []K) (s string) { + var missing []K + for _, key := range keys { + if _, exists := m[key]; !exists { + missing = append(missing, key) + } + } + if len(missing) > 0 { + s = "expected map to contain keys\n" + for _, key := range missing { + s += bullet("key: %v\n", key) + } + } + return +} + +func MapNotContainsKeys[M ~map[K]V, K comparable, V any](m M, keys []K) (s string) { + var unwanted []K + for _, key := range keys { + if _, exists := m[key]; exists { + unwanted = append(unwanted, key) + } + } + if len(unwanted) > 0 { + s = "expected map to not contain keys\n" + for _, key := range unwanted { + s += bullet("key: %v\n", key) + } + } + return +} + +func mapContains[M ~map[K]V, K comparable, V any](m M, values []V, eq func(V, V) bool) (s string) { + var missing []V + for _, wanted := range values { + found := false + for _, v := range m { + if eq(wanted, v) { + found = true + break + } + } + if !found { + missing = append(missing, wanted) + } + } + + if len(missing) > 0 { + s = "expected map to contain values\n" + for _, val := range missing { + s += bullet("val: %v\n", val) + } + } + return +} + +func mapNotContains[M ~map[K]V, K comparable, V any](m M, values []V, eq func(V, V) bool) (s string) { + var unexpected []V + for _, target := range values { + found := false + for _, v := range m { + if eq(target, v) { + found = true + break + } + } + if found { + unexpected = append(unexpected, target) + } + } + if len(unexpected) > 0 { + s = "expected map to not contain values\n" + for _, val := range unexpected { + s += bullet("val: %v\n", val) + } + } + return +} + +func MapContainsValues[M ~map[K]V, K comparable, V any](m M, vals []V, opts cmp.Options) (s string) { + return mapContains(m, vals, func(a, b V) bool { + return equal(a, b, opts) + }) +} + +func MapNotContainsValues[M ~map[K]V, K comparable, V any](m M, vals []V, opts cmp.Options) (s string) { + return mapNotContains(m, vals, func(a, b V) bool { + return equal(a, b, opts) + }) +} + +func MapContainsValuesFunc[M ~map[K]V, K comparable, V any](m M, vals []V, eq func(V, V) bool) (s string) { + return mapContains(m, vals, eq) +} + +func MapNotContainsValuesFunc[M ~map[K]V, K comparable, V any](m M, vals []V, eq func(V, V) bool) (s string) { + return mapNotContains(m, vals, eq) +} + +func MapContainsValuesEqual[M ~map[K]V, K comparable, V interfaces.EqualFunc[V]](m M, vals []V) (s string) { + return mapContains(m, vals, func(a, b V) bool { + return a.Equal(b) + }) +} + +func MapNotContainsValuesEqual[M ~map[K]V, K comparable, V interfaces.EqualFunc[V]](m M, vals []V) (s string) { + return mapNotContains(m, vals, func(a, b V) bool { + return a.Equal(b) + }) +} + +func FileExistsFS(system fs.FS, file string) (s string) { + info, err := fs.Stat(system, file) + if errors.Is(err, fs.ErrNotExist) { + s = "expected file to exist\n" + s += bullet(" name: %s\n", file) + s += bullet("error: %s\n", err) + return + } + + // other errors - file probably exists but cannot be read + if info.IsDir() { + s = "expected file but is a directory\n" + s += bullet("name: %s\n", file) + return + } + return +} + +func FileNotExistsFS(system fs.FS, file string) (s string) { + _, err := fs.Stat(system, file) + if !errors.Is(err, fs.ErrNotExist) { + s = "expected file to not exist\n" + s += bullet("name: %s\n", file) + return + } + return +} + +func DirExistsFS(system fs.FS, directory string) (s string) { + info, err := fs.Stat(system, directory) + if os.IsNotExist(err) { + s = "expected directory to exist\n" + s += bullet(" name: %s\n", directory) + s += bullet("error: %s\n", err) + return + } + // other errors - directory probably exists but cannot be read + if !info.IsDir() { + s = "expected directory but is a file\n" + s += bullet("name: %s\n", directory) + return + } + return +} + +func DirNotExistsFS(system fs.FS, directory string) (s string) { + _, err := fs.Stat(system, directory) + if !errors.Is(err, fs.ErrNotExist) { + s = "expected directory to not exist\n" + s += bullet("name: %s\n", directory) + return + } + return +} + +func FileModeFS(system fs.FS, path string, permissions fs.FileMode) (s string) { + info, err := fs.Stat(system, path) + if err != nil { + s = "expected to stat path\n" + s += bullet(" name: %s\n", path) + s += bullet("error: %s\n", err) + return + } + + mode := info.Mode() + if permissions != mode { + s = "expected different file permissions\n" + s += bullet("name: %s\n", path) + s += bullet(" exp: %s\n", permissions) + s += bullet(" got: %s\n", mode) + } + return +} + +func FileContainsFS(system fs.FS, file, content string) (s string) { + b, err := fs.ReadFile(system, file) + if err != nil { + s = "expected to read file\n" + s += bullet(" name: %s\n", file) + s += bullet("error: %s\n", err) + return + } + actual := string(b) + if !strings.Contains(string(b), content) { + s = "expected file contents\n" + s += bullet(" name: %s\n", file) + s += bullet("wanted: %s\n", content) + s += bullet("actual: %s\n", actual) + return + } + return +} + +func FilePathValid(path string) (s string) { + if !fs.ValidPath(path) { + s = "expected valid file path\n" + } + return +} + +func StrEqFold(exp, val string) (s string) { + if !strings.EqualFold(exp, val) { + s = "expected strings to be equal ignoring case\n" + s += bullet("exp: %s\n", exp) + s += bullet("val: %s\n", val) + } + return +} + +func StrNotEqFold(exp, val string) (s string) { + if strings.EqualFold(exp, val) { + s = "expected strings to not be equal ignoring case; but they are\n" + s += bullet("exp: %s\n", exp) + s += bullet("val: %s\n", val) + } + return +} + +func StrContains(str, sub string) (s string) { + if !strings.Contains(str, sub) { + s = "expected string to contain substring; it does not\n" + s += bullet("substring: %s\n", sub) + s += bullet(" string: %s\n", str) + } + return +} + +func StrContainsFold(str, sub string) (s string) { + upperS := strings.ToUpper(str) + upperSub := strings.ToUpper(sub) + return StrContains(upperS, upperSub) +} + +func StrNotContains(str, sub string) (s string) { + if strings.Contains(str, sub) { + s = "expected string to not contain substring; but it does\n" + s += bullet("substring: %s\n", sub) + s += bullet(" string: %s\n", str) + } + return +} + +func StrNotContainsFold(str, sub string) (s string) { + upperS := strings.ToUpper(str) + upperSub := strings.ToUpper(sub) + return StrNotContains(upperS, upperSub) +} + +func StrContainsAny(str, chars string) (s string) { + if !strings.ContainsAny(str, chars) { + s = "expected string to contain one or more code points\n" + s += bullet("code-points: %s\n", chars) + s += bullet(" string: %s\n", str) + } + return +} + +func StrNotContainsAny(str, chars string) (s string) { + if strings.ContainsAny(str, chars) { + s = "expected string to not contain code points; but it does\n" + s += bullet("code-points: %s\n", chars) + s += bullet(" string: %s\n", str) + } + return +} + +func StrCount(str, sub string, exp int) (s string) { + count := strings.Count(str, sub) + if count != exp { + s = fmt.Sprintf("expected string to contain %d non-overlapping cases of substring\n", exp) + s += bullet("count: %d\n", count) + } + return +} + +func StrContainsFields(str string, fields []string) (s string) { + set := make(map[string]struct{}, len(fields)) + for _, field := range strings.Fields(str) { + set[field] = struct{}{} + } + var missing []string + for _, field := range fields { + if _, exists := set[field]; !exists { + missing = append(missing, field) + } + } + if len(missing) > 0 { + s = "expected fields of string to contain subset of values\n" + s += bullet("missing: %s\n", strings.Join(missing, ", ")) + } + return +} + +func StrHasPrefix(prefix, str string) (s string) { + if !strings.HasPrefix(str, prefix) { + s = "expected string to have prefix\n" + s += bullet("prefix: %s\n", prefix) + s += bullet("string: %s\n", str) + } + return +} + +func StrNotHasPrefix(prefix, str string) (s string) { + if strings.HasPrefix(str, prefix) { + s = "expected string to not have prefix; but it does\n" + s += bullet("prefix: %s\n", prefix) + s += bullet("string: %s\n", str) + } + return +} + +func StrHasSuffix(suffix, str string) (s string) { + if !strings.HasSuffix(str, suffix) { + s = "expected string to have suffix\n" + s += bullet("suffix: %s\n", suffix) + s += bullet("string: %s\n", str) + } + return +} + +func StrNotHasSuffix(suffix, str string) (s string) { + if strings.HasSuffix(str, suffix) { + s = "expected string to not have suffix; but it does\n" + s += bullet("suffix: %s\n", suffix) + s += bullet("string: %s\n", str) + } + return +} + +func RegexMatch(re *regexp.Regexp, target string) (s string) { + if !re.MatchString(target) { + s = "expected regexp match\n" + s += bullet("regex: %s\n", re) + s += bullet("string: %s\n", target) + } + return +} + +func RegexpCompiles(expr string) (s string) { + if _, err := regexp.Compile(expr); err != nil { + s = "expected regular expression to compile\n" + s += bullet("regex: %s\n", expr) + s += bullet("error: %v\n", err) + } + return +} + +func RegexpCompilesPOSIX(expr string) (s string) { + if _, err := regexp.CompilePOSIX(expr); err != nil { + s = "expected regular expression to compile (posix)\n" + s += bullet("regex: %s\n", expr) + s += bullet("error: %v\n", err) + } + return +} + +// a10b173d-1427-432d-8a27-b12eada42feb +var uuid4Re = regexp.MustCompile(`^[[:xdigit:]]{8}-[[:xdigit:]]{4}-[[:xdigit:]]{4}-[[:xdigit:]]{4}-[[:xdigit:]]{12}$`) + +func UUIDv4(id string) (s string) { + if !uuid4Re.MatchString(id) { + s = "expected well-formed v4 UUID\n" + s += bullet("format: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX\n") + s += bullet("actual: " + id + "\n") + } + return +} + +func Length(n int, length interfaces.LengthFunc) (s string) { + if l := length.Len(); l != n { + s = "expected different length\n" + s += bullet("length: %d\n") + s += bullet("expected: %d\n", l, n) + } + return +} + +func Size(n int, size interfaces.SizeFunc) (s string) { + if l := size.Size(); l != n { + s = "expected different size\n" + s += bullet("size: %d\n", l) + s += bullet("expected: %d\n", n) + } + return +} + +func Empty(e interfaces.EmptyFunc) (s string) { + if !e.Empty() { + s = "expected to be empty, but was not\n" + } + return +} + +func NotEmpty(e interfaces.EmptyFunc) (s string) { + if e.Empty() { + s = "expected to not be empty, but is\n" + } + return +} + +func Contains[C any](i C, c interfaces.ContainsFunc[C]) (s string) { + if !c.Contains(i) { + s = "expected to contain element, but does not\n" + } + return +} + +func ContainsSubset[C any](elements []C, container interfaces.ContainsFunc[C]) (s string) { + for i := 0; i < len(elements); i++ { + element := elements[i] + if !container.Contains(element) { + s = "expected to contain element, but does not\n" + s += bullet("element: %v\n", element) + return + } + } + return +} + +func NotContains[C any](i C, c interfaces.ContainsFunc[C]) (s string) { + if c.Contains(i) { + s = "expected not to contain element, but it does\n" + } + return +} + +func Wait(wc *wait.Constraint) (s string) { + err := wc.Run() + if err != nil { + s = "expected condition to pass within wait context\n" + s += bullet("error: %v\n", err) + // context info? + } + return +} + +func bullet(msg string, args ...any) string { + return fmt.Sprintf("↪ "+msg, args...) +} diff --git a/vendor/github.com/shoenig/test/internal/brokenfs/fs_default.go b/vendor/github.com/shoenig/test/internal/brokenfs/fs_default.go new file mode 100644 index 00000000..841c4161 --- /dev/null +++ b/vendor/github.com/shoenig/test/internal/brokenfs/fs_default.go @@ -0,0 +1,7 @@ +//go:build !windows + +package brokenfs + +const ( + Root = "/" +) diff --git a/vendor/github.com/shoenig/test/internal/brokenfs/fs_windows.go b/vendor/github.com/shoenig/test/internal/brokenfs/fs_windows.go new file mode 100644 index 00000000..569d7ccc --- /dev/null +++ b/vendor/github.com/shoenig/test/internal/brokenfs/fs_windows.go @@ -0,0 +1,17 @@ +//go:build windows + +package brokenfs + +import ( + "os" +) + +var ( + Root = os.Getenv("HOMEDRIVE") +) + +func init() { + if Root == "" { + Root = "C:" + } +} diff --git a/vendor/github.com/shoenig/test/internal/constraints/constraints.go b/vendor/github.com/shoenig/test/internal/constraints/constraints.go new file mode 100644 index 00000000..2c033dff --- /dev/null +++ b/vendor/github.com/shoenig/test/internal/constraints/constraints.go @@ -0,0 +1,50 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package constraints defines a set of useful constraints to be used +// with type parameters. +package constraints + +// Signed is a constraint that permits any signed integer type. +// If future releases of Go add new predeclared signed integer types, +// this constraint will be modified to include them. +type Signed interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 +} + +// Unsigned is a constraint that permits any unsigned integer type. +// If future releases of Go add new predeclared unsigned integer types, +// this constraint will be modified to include them. +type Unsigned interface { + ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr +} + +// Integer is a constraint that permits any integer type. +// If future releases of Go add new predeclared integer types, +// this constraint will be modified to include them. +type Integer interface { + Signed | Unsigned +} + +// Float is a constraint that permits any floating-point type. +// If future releases of Go add new predeclared floating-point types, +// this constraint will be modified to include them. +type Float interface { + ~float32 | ~float64 +} + +// Complex is a constraint that permits any complex numeric type. +// If future releases of Go add new predeclared complex numeric types, +// this constraint will be modified to include them. +type Complex interface { + ~complex64 | ~complex128 +} + +// Ordered is a constraint that permits any ordered type: any type +// that supports the operators < <= >= >. +// If future releases of Go add new ordered types, +// this constraint will be modified to include them. +type Ordered interface { + Integer | Float | ~string +} diff --git a/vendor/github.com/shoenig/test/must/assert.go b/vendor/github.com/shoenig/test/must/assert.go new file mode 100644 index 00000000..37b76d52 --- /dev/null +++ b/vendor/github.com/shoenig/test/must/assert.go @@ -0,0 +1,12 @@ +package must + +// T is the minimal set of functions to be implemented by any testing framework +// compatible with the must package. +type T interface { + Helper() + Fatalf(string, ...any) +} + +func errorf(t T, msg string, args ...any) { + t.Fatalf(msg, args...) +} diff --git a/vendor/github.com/shoenig/test/must/fs_default.go b/vendor/github.com/shoenig/test/must/fs_default.go new file mode 100644 index 00000000..38a13b5f --- /dev/null +++ b/vendor/github.com/shoenig/test/must/fs_default.go @@ -0,0 +1,9 @@ +// Code generated via scripts/generate.sh. DO NOT EDIT. + +//go:build !windows + +package must + +var ( + fsRoot = "/" +) diff --git a/vendor/github.com/shoenig/test/must/fs_windows.go b/vendor/github.com/shoenig/test/must/fs_windows.go new file mode 100644 index 00000000..ae1f96c1 --- /dev/null +++ b/vendor/github.com/shoenig/test/must/fs_windows.go @@ -0,0 +1,19 @@ +// Code generated via scripts/generate.sh. DO NOT EDIT. + +//go:build windows + +package must + +import ( + "os" +) + +var ( + fsRoot = os.Getenv("HOMEDRIVE") +) + +func init() { + if fsRoot == "" { + fsRoot = "C:" + } +} diff --git a/vendor/github.com/shoenig/test/must/invocations.go b/vendor/github.com/shoenig/test/must/invocations.go new file mode 100644 index 00000000..44aeddfb --- /dev/null +++ b/vendor/github.com/shoenig/test/must/invocations.go @@ -0,0 +1,27 @@ +// Code generated via scripts/generate.sh. DO NOT EDIT. + +// Package test provides a modern generic testing assertions library. +package must + +import ( + "strings" + + "github.com/shoenig/test/internal/assertions" +) + +func passing(result string) bool { + return result == "" +} + +func fail(t T, msg string, scripts ...PostScript) { + c := assertions.Caller() + s := c + msg + "\n" + run(scripts...) + errorf(t, "\n"+strings.TrimSpace(s)+"\n") +} + +func invoke(t T, result string, settings ...Setting) { + result = strings.TrimSpace(result) + if !passing(result) { + fail(t, result, scripts(settings...)...) + } +} diff --git a/vendor/github.com/shoenig/test/must/must.go b/vendor/github.com/shoenig/test/must/must.go new file mode 100644 index 00000000..bcf3f906 --- /dev/null +++ b/vendor/github.com/shoenig/test/must/must.go @@ -0,0 +1,717 @@ +// Code generated via scripts/generate.sh. DO NOT EDIT. + +package must + +import ( + "io/fs" + "os" + "regexp" + "strings" + + "github.com/shoenig/test/interfaces" + "github.com/shoenig/test/internal/assertions" + "github.com/shoenig/test/internal/brokenfs" + "github.com/shoenig/test/internal/constraints" + "github.com/shoenig/test/wait" +) + +// Nil asserts a is nil. +func Nil(t T, a any, settings ...Setting) { + t.Helper() + invoke(t, assertions.Nil(a), settings...) +} + +// NotNil asserts a is not nil. +func NotNil(t T, a any, settings ...Setting) { + t.Helper() + invoke(t, assertions.NotNil(a), settings...) +} + +// True asserts that condition is true. +func True(t T, condition bool, settings ...Setting) { + t.Helper() + invoke(t, assertions.True(condition), settings...) +} + +// False asserts condition is false. +func False(t T, condition bool, settings ...Setting) { + t.Helper() + invoke(t, assertions.False(condition), settings...) +} + +// Unreachable asserts a code path is not executed. +func Unreachable(t T, settings ...Setting) { + t.Helper() + invoke(t, assertions.Unreachable(), settings...) +} + +// Error asserts err is a non-nil error. +func Error(t T, err error, settings ...Setting) { + t.Helper() + invoke(t, assertions.Error(err), settings...) +} + +// EqError asserts err contains message msg. +func EqError(t T, err error, msg string, settings ...Setting) { + t.Helper() + invoke(t, assertions.EqError(err, msg), settings...) +} + +// ErrorIs asserts err +func ErrorIs(t T, err error, target error, settings ...Setting) { + t.Helper() + invoke(t, assertions.ErrorIs(err, target), settings...) +} + +// NoError asserts err is a nil error. +func NoError(t T, err error, settings ...Setting) { + t.Helper() + invoke(t, assertions.NoError(err), settings...) +} + +// ErrorContains asserts err contains sub. +func ErrorContains(t T, err error, sub string, settings ...Setting) { + t.Helper() + invoke(t, assertions.ErrorContains(err, sub), settings...) +} + +// Eq asserts exp and val are equal using cmp.Equal. +func Eq[A any](t T, exp, val A, settings ...Setting) { + t.Helper() + invoke(t, assertions.Eq(exp, val, options(settings...)...), settings...) +} + +// EqOp asserts exp == val. +func EqOp[C comparable](t T, exp, val C, settings ...Setting) { + t.Helper() + invoke(t, assertions.EqOp(exp, val), settings...) +} + +// EqFunc asserts exp and val are equal using eq. +func EqFunc[A any](t T, exp, val A, eq func(a, b A) bool, settings ...Setting) { + t.Helper() + invoke(t, assertions.EqFunc(exp, val, eq), settings...) +} + +// NotEq asserts exp and val are not equal using cmp.Equal. +func NotEq[A any](t T, exp, val A, settings ...Setting) { + t.Helper() + invoke(t, assertions.NotEq(exp, val, options(settings...)...), settings...) +} + +// NotEqOp asserts exp != val. +func NotEqOp[C comparable](t T, exp, val C, settings ...Setting) { + t.Helper() + invoke(t, assertions.NotEqOp(exp, val), settings...) +} + +// NotEqFunc asserts exp and val are not equal using eq. +func NotEqFunc[A any](t T, exp, val A, eq func(a, b A) bool, settings ...Setting) { + t.Helper() + invoke(t, assertions.NotEqFunc(exp, val, eq), settings...) +} + +// EqJSON asserts exp and val are equivalent JSON. +func EqJSON(t T, exp, val string, settings ...Setting) { + t.Helper() + invoke(t, assertions.EqJSON(exp, val), settings...) +} + +// ValidJSON asserts js is valid JSON. +func ValidJSON(t T, js string, settings ...Setting) { + t.Helper() + invoke(t, assertions.ValidJSON(js), settings...) +} + +// ValidJSONBytes asserts js is valid JSON. +func ValidJSONBytes(t T, js []byte, settings ...Setting) { + t.Helper() + invoke(t, assertions.ValidJSONBytes(js)) +} + +// Equal asserts val.Equal(exp). +func Equal[E interfaces.EqualFunc[E]](t T, exp, val E, settings ...Setting) { + t.Helper() + invoke(t, assertions.Equal(exp, val), settings...) +} + +// NotEqual asserts !val.Equal(exp). +func NotEqual[E interfaces.EqualFunc[E]](t T, exp, val E, settings ...Setting) { + t.Helper() + invoke(t, assertions.NotEqual(exp, val), settings...) +} + +// Lesser asserts val.Less(exp). +func Lesser[L interfaces.LessFunc[L]](t T, exp, val L, settings ...Setting) { + t.Helper() + invoke(t, assertions.Lesser(exp, val), settings...) +} + +// SliceEqFunc asserts elements of exp and val are the same using eq. +func SliceEqFunc[A any](t T, exp, val []A, eq func(a, b A) bool, settings ...Setting) { + t.Helper() + invoke(t, assertions.EqSliceFunc(exp, val, eq), settings...) +} + +// SliceEqual asserts val[n].Equal(exp[n]) for each element n. +func SliceEqual[E interfaces.EqualFunc[E]](t T, exp, val []E, settings ...Setting) { + t.Helper() + invoke(t, assertions.SliceEqual(exp, val), settings...) +} + +// SliceEmpty asserts slice is empty. +func SliceEmpty[A any](t T, slice []A, settings ...Setting) { + t.Helper() + invoke(t, assertions.SliceEmpty(slice), settings...) +} + +// SliceNotEmpty asserts slice is not empty. +func SliceNotEmpty[A any](t T, slice []A, settings ...Setting) { + t.Helper() + invoke(t, assertions.SliceNotEmpty(slice), settings...) +} + +// SliceLen asserts slice is of length n. +func SliceLen[A any](t T, n int, slice []A, settings ...Setting) { + t.Helper() + invoke(t, assertions.SliceLen(n, slice), settings...) +} + +// Len asserts slice is of length n. +// +// Shorthand function for SliceLen. For checking Len() of a struct, +// use the Length() assertion. +func Len[A any](t T, n int, slice []A, settings ...Setting) { + t.Helper() + invoke(t, assertions.SliceLen(n, slice), settings...) +} + +// SliceContainsOp asserts item exists in slice using == operator. +func SliceContainsOp[C comparable](t T, slice []C, item C, settings ...Setting) { + t.Helper() + invoke(t, assertions.SliceContainsOp(slice, item), settings...) +} + +// SliceContainsFunc asserts item exists in slice, using eq to compare elements. +func SliceContainsFunc[A, B any](t T, slice []A, item B, eq func(a A, b B) bool, settings ...Setting) { + t.Helper() + invoke(t, assertions.SliceContainsFunc(slice, item, eq), settings...) +} + +// SliceContainsEqual asserts item exists in slice, using Equal to compare elements. +func SliceContainsEqual[E interfaces.EqualFunc[E]](t T, slice []E, item E, settings ...Setting) { + t.Helper() + invoke(t, assertions.SliceContainsEqual(slice, item), settings...) +} + +// SliceContains asserts item exists in slice, using cmp.Equal to compare elements. +func SliceContains[A any](t T, slice []A, item A, settings ...Setting) { + t.Helper() + invoke(t, assertions.SliceContains(slice, item, options(settings...)...), settings...) +} + +// SliceNotContains asserts item does not exist in slice, using cmp.Equal to +// compare elements. +func SliceNotContains[A any](t T, slice []A, item A, settings ...Setting) { + t.Helper() + invoke(t, assertions.SliceNotContains(slice, item), settings...) +} + +// SliceContainsAll asserts slice and items contain the same elements, but in +// no particular order. The number of elements in slice and items must be the +// same. +func SliceContainsAll[A any](t T, slice, items []A, settings ...Setting) { + t.Helper() + invoke(t, assertions.SliceContainsAll(slice, items), settings...) +} + +// SliceContainsSubset asserts slice contains each item in items, in no particular +// order. There could be additional elements in slice not in items. +func SliceContainsSubset[A any](t T, slice, items []A, settings ...Setting) { + t.Helper() + invoke(t, assertions.SliceContainsSubset(slice, items), settings...) +} + +// Positive asserts n > 0. +func Positive[N interfaces.Number](t T, n N, settings ...Setting) { + t.Helper() + invoke(t, assertions.Positive(n), settings...) +} + +// NonPositive asserts n ≤ 0. +func NonPositive[N interfaces.Number](t T, n N, settings ...Setting) { + t.Helper() + invoke(t, assertions.NonPositive(n), settings...) +} + +// Negative asserts n < 0. +func Negative[N interfaces.Number](t T, n N, settings ...Setting) { + t.Helper() + invoke(t, assertions.Negative(n), settings...) +} + +// NonNegative asserts n >= 0. +func NonNegative[N interfaces.Number](t T, n N, settings ...Setting) { + t.Helper() + invoke(t, assertions.NonNegative(n), settings...) +} + +// Zero asserts n == 0. +func Zero[N interfaces.Number](t T, n N, settings ...Setting) { + t.Helper() + invoke(t, assertions.Zero(n), settings...) +} + +// NonZero asserts n != 0. +func NonZero[N interfaces.Number](t T, n N, settings ...Setting) { + t.Helper() + invoke(t, assertions.NonZero(n), settings...) +} + +// One asserts n == 1. +func One[N interfaces.Number](t T, n N, settings ...Setting) { + t.Helper() + invoke(t, assertions.One(n), settings...) +} + +// Less asserts val < exp. +func Less[O constraints.Ordered](t T, exp, val O, settings ...Setting) { + t.Helper() + invoke(t, assertions.Less(exp, val), settings...) +} + +// LessEq asserts val ≤ exp. +func LessEq[O constraints.Ordered](t T, exp, val O, settings ...Setting) { + t.Helper() + invoke(t, assertions.LessEq(exp, val), settings...) +} + +// Greater asserts val > exp. +func Greater[O constraints.Ordered](t T, exp, val O, settings ...Setting) { + t.Helper() + invoke(t, assertions.Greater(exp, val), settings...) +} + +// GreaterEq asserts val ≥ exp. +func GreaterEq[O constraints.Ordered](t T, exp, val O, settings ...Setting) { + t.Helper() + invoke(t, assertions.GreaterEq(exp, val), settings...) +} + +// Between asserts lower ≤ val ≤ upper. +func Between[O constraints.Ordered](t T, lower, val, upper O, settings ...Setting) { + t.Helper() + invoke(t, assertions.Between(lower, val, upper), settings...) +} + +// BetweenExclusive asserts lower < val < upper. +func BetweenExclusive[O constraints.Ordered](t T, lower, val, upper O, settings ...Setting) { + t.Helper() + invoke(t, assertions.BetweenExclusive(lower, val, upper), settings...) +} + +// Ascending asserts slice[n] ≤ slice[n+1] for each element n. +func Ascending[O constraints.Ordered](t T, slice []O, settings ...Setting) { + t.Helper() + invoke(t, assertions.Ascending(slice), settings...) +} + +// AscendingFunc asserts slice[n] is less than slice[n+1] for each element n using the less comparator. +func AscendingFunc[A any](t T, slice []A, less func(A, A) bool, settings ...Setting) { + t.Helper() + invoke(t, assertions.AscendingFunc(slice, less), settings...) +} + +// AscendingLess asserts slice[n].Less(slice[n+1]) for each element n. +func AscendingLess[L interfaces.LessFunc[L]](t T, slice []L, settings ...Setting) { + t.Helper() + invoke(t, assertions.AscendingLess(slice), settings...) +} + +// Descending asserts slice[n] ≥ slice[n+1] for each element n. +func Descending[O constraints.Ordered](t T, slice []O, settings ...Setting) { + t.Helper() + invoke(t, assertions.Descending(slice), settings...) +} + +// DescendingFunc asserts slice[n+1] is less than slice[n] for each element n using the less comparator. +func DescendingFunc[A any](t T, slice []A, less func(A, A) bool, settings ...Setting) { + t.Helper() + invoke(t, assertions.DescendingFunc(slice, less), settings...) +} + +// DescendingLess asserts slice[n+1].Less(slice[n]) for each element n. +func DescendingLess[L interfaces.LessFunc[L]](t T, slice []L, settings ...Setting) { + t.Helper() + invoke(t, assertions.DescendingLess(slice), settings...) +} + +// InDelta asserts a and b are within delta of each other. +func InDelta[N interfaces.Number](t T, a, b, delta N, settings ...Setting) { + t.Helper() + invoke(t, assertions.InDelta(a, b, delta), settings...) +} + +// InDeltaSlice asserts each element a[n] is within delta of b[n]. +func InDeltaSlice[N interfaces.Number](t T, a, b []N, delta N, settings ...Setting) { + t.Helper() + invoke(t, assertions.InDeltaSlice(a, b, delta), settings...) +} + +// MapEq asserts maps exp and val contain the same key/val pairs, using +// cmp.Equal function to compare vals. +func MapEq[M1, M2 interfaces.Map[K, V], K comparable, V any](t T, exp M1, val M2, settings ...Setting) { + t.Helper() + invoke(t, assertions.MapEq(exp, val, options(settings...)), settings...) +} + +// MapEqFunc asserts maps exp and val contain the same key/val pairs, using eq to +// compare vals. +func MapEqFunc[M1, M2 interfaces.Map[K, V], K comparable, V any](t T, exp M1, val M2, eq func(V, V) bool, settings ...Setting) { + t.Helper() + invoke(t, assertions.MapEqFunc(exp, val, eq), settings...) +} + +// MapEqual asserts maps exp and val contain the same key/val pairs, using Equal +// method to compare vals +func MapEqual[M interfaces.MapEqualFunc[K, V], K comparable, V interfaces.EqualFunc[V]](t T, exp, val M, settings ...Setting) { + t.Helper() + invoke(t, assertions.MapEqual(exp, val), settings...) +} + +// MapLen asserts map is of size n. +func MapLen[M ~map[K]V, K comparable, V any](t T, n int, m M, settings ...Setting) { + t.Helper() + invoke(t, assertions.MapLen(n, m), settings...) +} + +// MapEmpty asserts map is empty. +func MapEmpty[M ~map[K]V, K comparable, V any](t T, m M, settings ...Setting) { + t.Helper() + invoke(t, assertions.MapEmpty(m), settings...) +} + +// MapNotEmpty asserts map is not empty. +func MapNotEmpty[M ~map[K]V, K comparable, V any](t T, m M, settings ...Setting) { + t.Helper() + invoke(t, assertions.MapNotEmpty(m), settings...) +} + +// MapContainsKey asserts m contains key. +func MapContainsKey[M ~map[K]V, K comparable, V any](t T, m M, key K, settings ...Setting) { + t.Helper() + invoke(t, assertions.MapContainsKey(m, key), settings...) +} + +// MapNotContainsKey asserts m does not contain key. +func MapNotContainsKey[M ~map[K]V, K comparable, V any](t T, m M, key K, settings ...Setting) { + t.Helper() + invoke(t, assertions.MapNotContainsKey(m, key), settings...) +} + +// MapContainsKeys asserts m contains each key in keys. +func MapContainsKeys[M ~map[K]V, K comparable, V any](t T, m M, keys []K, settings ...Setting) { + t.Helper() + invoke(t, assertions.MapContainsKeys(m, keys), settings...) +} + +// MapNotContainsKeys asserts m does not contain any key in keys. +func MapNotContainsKeys[M ~map[K]V, K comparable, V any](t T, m M, keys []K, settings ...Setting) { + t.Helper() + invoke(t, assertions.MapNotContainsKeys(m, keys), settings...) +} + +// MapContainsValues asserts m contains each val in vals. +func MapContainsValues[M ~map[K]V, K comparable, V any](t T, m M, vals []V, settings ...Setting) { + t.Helper() + invoke(t, assertions.MapContainsValues(m, vals, options(settings...)), settings...) +} + +// MapNotContainsValues asserts m does not contain any value in vals. +func MapNotContainsValues[M ~map[K]V, K comparable, V any](t T, m M, vals []V, settings ...Setting) { + t.Helper() + invoke(t, assertions.MapNotContainsValues(m, vals, options(settings...)), settings...) +} + +// MapContainsValuesFunc asserts m contains each val in vals using the eq function. +func MapContainsValuesFunc[M ~map[K]V, K comparable, V any](t T, m M, vals []V, eq func(V, V) bool, settings ...Setting) { + t.Helper() + invoke(t, assertions.MapContainsValuesFunc(m, vals, eq), settings...) +} + +// MapNotContainsValuesFunc asserts m does not contain any value in vals using the eq function. +func MapNotContainsValuesFunc[M ~map[K]V, K comparable, V any](t T, m M, vals []V, eq func(V, V) bool, settings ...Setting) { + t.Helper() + invoke(t, assertions.MapNotContainsValuesFunc(m, vals, eq), settings...) +} + +// MapContainsValuesEqual asserts m contains each val in vals using the V.Equal method. +func MapContainsValuesEqual[M ~map[K]V, K comparable, V interfaces.EqualFunc[V]](t T, m M, vals []V, settings ...Setting) { + t.Helper() + invoke(t, assertions.MapContainsValuesEqual(m, vals), settings...) +} + +// MapNotContainsValuesEqual asserts m does not contain any value in vals using the V.Equal method. +func MapNotContainsValuesEqual[M ~map[K]V, K comparable, V interfaces.EqualFunc[V]](t T, m M, vals []V, settings ...Setting) { + t.Helper() + invoke(t, assertions.MapNotContainsValuesEqual(m, vals), settings...) +} + +// FileExistsFS asserts file exists on the fs.FS filesystem. +// +// Example, +// FileExistsFS(t, os.DirFS("/etc"), "hosts") +func FileExistsFS(t T, system fs.FS, file string, settings ...Setting) { + t.Helper() + invoke(t, assertions.FileExistsFS(system, file), settings...) +} + +// FileExists asserts file exists on the OS filesystem. +func FileExists(t T, file string, settings ...Setting) { + t.Helper() + file = strings.TrimPrefix(file, "/") + invoke(t, assertions.FileExistsFS(os.DirFS(brokenfs.Root), file), settings...) +} + +// FileNotExistsFS asserts file does not exist on the fs.FS filesystem. +// +// Example, +// FileNotExist(t, os.DirFS("/bin"), "exploit.exe") +func FileNotExistsFS(t T, system fs.FS, file string, settings ...Setting) { + t.Helper() + invoke(t, assertions.FileNotExistsFS(system, file), settings...) +} + +// FileNotExists asserts file does not exist on the OS filesystem. +func FileNotExists(t T, file string, settings ...Setting) { + t.Helper() + invoke(t, assertions.FileNotExistsFS(os.DirFS(brokenfs.Root), file), settings...) +} + +// DirExistsFS asserts directory exists on the fs.FS filesystem. +// +// Example, +// DirExistsFS(t, os.DirFS("/usr/local"), "bin") +func DirExistsFS(t T, system fs.FS, directory string, settings ...Setting) { + t.Helper() + directory = strings.TrimPrefix(directory, "/") + invoke(t, assertions.DirExistsFS(system, directory), settings...) +} + +// DirExists asserts directory exists on the OS filesystem. +func DirExists(t T, directory string, settings ...Setting) { + t.Helper() + directory = strings.TrimPrefix(directory, "/") + invoke(t, assertions.DirExistsFS(os.DirFS(brokenfs.Root), directory), settings...) +} + +// DirNotExistsFS asserts directory does not exist on the fs.FS filesystem. +// +// Example, +// DirNotExistsFS(t, os.DirFS("/tmp"), "scratch") +func DirNotExistsFS(t T, system fs.FS, directory string, settings ...Setting) { + t.Helper() + invoke(t, assertions.DirNotExistsFS(system, directory), settings...) +} + +// DirNotExists asserts directory does not exist on the OS filesystem. +func DirNotExists(t T, directory string, settings ...Setting) { + t.Helper() + invoke(t, assertions.DirNotExistsFS(os.DirFS(brokenfs.Root), directory), settings...) +} + +// FileModeFS asserts the file or directory at path on fs.FS has exactly the given permission bits. +// +// Example, +// FileModeFS(t, os.DirFS("/bin"), "find", 0655) +func FileModeFS(t T, system fs.FS, path string, permissions fs.FileMode, settings ...Setting) { + t.Helper() + invoke(t, assertions.FileModeFS(system, path, permissions), settings...) +} + +// FileMode asserts the file or directory at path on the OS filesystem has exactly the given permission bits. +func FileMode(t T, path string, permissions fs.FileMode, settings ...Setting) { + t.Helper() + path = strings.TrimPrefix(path, "/") + invoke(t, assertions.FileModeFS(os.DirFS(brokenfs.Root), path, permissions), settings...) +} + +// FileContainsFS asserts the file on fs.FS contains content as a substring. +// +// Often os.DirFS is used to interact with the host filesystem. +// Example, +// FileContainsFS(t, os.DirFS("/etc"), "hosts", "localhost") +func FileContainsFS(t T, system fs.FS, file, content string, settings ...Setting) { + t.Helper() + invoke(t, assertions.FileContainsFS(system, file, content), settings...) +} + +// FileContains asserts the file on the OS filesystem contains content as a substring. +func FileContains(t T, file, content string, settings ...Setting) { + t.Helper() + file = strings.TrimPrefix(file, "/") + invoke(t, assertions.FileContainsFS(os.DirFS(brokenfs.Root), file, content), settings...) +} + +// FilePathValid asserts path is a valid file path. +func FilePathValid(t T, path string, settings ...Setting) { + t.Helper() + invoke(t, assertions.FilePathValid(path), settings...) +} + +// StrEqFold asserts exp and val are equivalent, ignoring case. +func StrEqFold(t T, exp, val string, settings ...Setting) { + t.Helper() + invoke(t, assertions.StrEqFold(exp, val), settings...) +} + +// StrNotEqFold asserts exp and val are not equivalent, ignoring case. +func StrNotEqFold(t T, exp, val string, settings ...Setting) { + t.Helper() + invoke(t, assertions.StrNotEqFold(exp, val), settings...) +} + +// StrContains asserts s contains substring sub. +func StrContains(t T, s, sub string, settings ...Setting) { + t.Helper() + invoke(t, assertions.StrContains(s, sub), settings...) +} + +// StrContainsFold asserts s contains substring sub, ignoring case. +func StrContainsFold(t T, s, sub string, settings ...Setting) { + t.Helper() + invoke(t, assertions.StrContainsFold(s, sub), settings...) +} + +// StrNotContains asserts s does not contain substring sub. +func StrNotContains(t T, s, sub string, settings ...Setting) { + t.Helper() + invoke(t, assertions.StrNotContains(s, sub), settings...) +} + +// StrNotContainsFold asserts s does not contain substring sub, ignoring case. +func StrNotContainsFold(t T, s, sub string, settings ...Setting) { + t.Helper() + invoke(t, assertions.StrNotContainsFold(s, sub), settings...) +} + +// StrContainsAny asserts s contains at least one character in chars. +func StrContainsAny(t T, s, chars string, settings ...Setting) { + t.Helper() + invoke(t, assertions.StrContainsAny(s, chars), settings...) +} + +// StrNotContainsAny asserts s does not contain any character in chars. +func StrNotContainsAny(t T, s, chars string, settings ...Setting) { + t.Helper() + invoke(t, assertions.StrNotContainsAny(s, chars), settings...) +} + +// StrCount asserts s contains exactly count instances of substring sub. +func StrCount(t T, s, sub string, count int, settings ...Setting) { + t.Helper() + invoke(t, assertions.StrCount(s, sub, count), settings...) +} + +// StrContainsFields asserts that fields is a subset of the result of strings.Fields(s). +func StrContainsFields(t T, s string, fields []string, settings ...Setting) { + t.Helper() + invoke(t, assertions.StrContainsFields(s, fields), settings...) +} + +// StrHasPrefix asserts that s starts with prefix. +func StrHasPrefix(t T, prefix, s string, settings ...Setting) { + t.Helper() + invoke(t, assertions.StrHasPrefix(prefix, s), settings...) +} + +// StrNotHasPrefix asserts that s does not start with prefix. +func StrNotHasPrefix(t T, prefix, s string, settings ...Setting) { + t.Helper() + invoke(t, assertions.StrNotHasPrefix(prefix, s), settings...) +} + +// StrHasSuffix asserts that s ends with suffix. +func StrHasSuffix(t T, suffix, s string, settings ...Setting) { + t.Helper() + invoke(t, assertions.StrHasSuffix(suffix, s), settings...) +} + +// StrNotHasSuffix asserts that s does not end with suffix. +func StrNotHasSuffix(t T, suffix, s string, settings ...Setting) { + t.Helper() + invoke(t, assertions.StrNotHasSuffix(suffix, s), settings...) +} + +// RegexMatch asserts regular expression re matches string s. +func RegexMatch(t T, re *regexp.Regexp, s string, settings ...Setting) { + t.Helper() + invoke(t, assertions.RegexMatch(re, s), settings...) +} + +// RegexCompiles asserts expr compiles as a valid regular expression. +func RegexCompiles(t T, expr string, settings ...Setting) { + t.Helper() + invoke(t, assertions.RegexpCompiles(expr), settings...) +} + +// RegexCompilesPOSIX asserts expr compiles as a valid POSIX regular expression. +func RegexCompilesPOSIX(t T, expr string, settings ...Setting) { + t.Helper() + invoke(t, assertions.RegexpCompilesPOSIX(expr), settings...) +} + +// UUIDv4 asserts id meets the criteria of a v4 UUID. +func UUIDv4(t T, id string, settings ...Setting) { + t.Helper() + invoke(t, assertions.UUIDv4(id), settings...) +} + +// Size asserts s.Size() is equal to exp. +func Size(t T, exp int, s interfaces.SizeFunc, settings ...Setting) { + t.Helper() + invoke(t, assertions.Size(exp, s), settings...) +} + +// Length asserts l.Len() is equal to exp. +func Length(t T, exp int, l interfaces.LengthFunc, settings ...Setting) { + t.Helper() + invoke(t, assertions.Length(exp, l), settings...) +} + +// Empty asserts e.Empty() is true. +func Empty(t T, e interfaces.EmptyFunc, settings ...Setting) { + t.Helper() + invoke(t, assertions.Empty(e), settings...) +} + +// NotEmpty asserts e.Empty() is false. +func NotEmpty(t T, e interfaces.EmptyFunc, settings ...Setting) { + t.Helper() + invoke(t, assertions.NotEmpty(e), settings...) +} + +// Contains asserts container.ContainsFunc(element) is true. +func Contains[C any](t T, element C, container interfaces.ContainsFunc[C], settings ...Setting) { + t.Helper() + invoke(t, assertions.Contains(element, container), settings...) +} + +// ContainsSubset asserts each element in elements exists in container, in no particular order. +// There may be elements in container beyond what is present in elements. +func ContainsSubset[C any](t T, elements []C, container interfaces.ContainsFunc[C], settings ...Setting) { + t.Helper() + invoke(t, assertions.ContainsSubset(elements, container), settings...) +} + +// NotContains asserts container.ContainsFunc(element) is false. +func NotContains[C any](t T, element C, container interfaces.ContainsFunc[C], settings ...Setting) { + t.Helper() + invoke(t, assertions.NotContains(element, container), settings...) +} + +// Wait asserts wc. +func Wait(t T, wc *wait.Constraint, settings ...Setting) { + t.Helper() + invoke(t, assertions.Wait(wc), settings...) +} diff --git a/vendor/github.com/shoenig/test/must/scripts.go b/vendor/github.com/shoenig/test/must/scripts.go new file mode 100644 index 00000000..0bd7f05e --- /dev/null +++ b/vendor/github.com/shoenig/test/must/scripts.go @@ -0,0 +1,95 @@ +// Code generated via scripts/generate.sh. DO NOT EDIT. + +package must + +import ( + "fmt" + "strings" +) + +func run(posts ...PostScript) string { + s := new(strings.Builder) + for _, post := range posts { + s.WriteString("↪ PostScript | ") + s.WriteString(post.Label()) + s.WriteString(" ↷\n") + s.WriteString(post.Content()) + s.WriteString("\n") + } + return s.String() +} + +// A PostScript is used to annotate a test failure with additional information. +// +// Can be useful in large e2e style test cases, where adding additional context +// beyond an assertion helps in debugging. +type PostScript interface { + // Label should categorize what is in Content. + Label() string + + // Content contains extra contextual information for debugging a test failure. + Content() string +} + +type script struct { + label string + content string +} + +func (s *script) Label() string { + return strings.TrimSpace(s.label) +} +func (s *script) Content() string { + return "\t" + strings.TrimSpace(s.content) +} + +// Sprintf appends a Sprintf-string as an annotation to the output of a test case failure. +func Sprintf(msg string, args ...any) Setting { + return func(s *Settings) { + s.postScripts = append(s.postScripts, &script{ + label: "annotation", + content: fmt.Sprintf(msg, args...), + }) + } +} + +// Sprint appends a Sprint-string as an annotation to the output of a test case failure. +func Sprint(args ...any) Setting { + return func(s *Settings) { + s.postScripts = append(s.postScripts, &script{ + label: "annotation", + content: strings.TrimSpace(fmt.Sprintln(args...)), + }) + } +} + +// Values adds formatted key-val mappings as an annotation to the output of a test case failure. +func Values(vals ...any) Setting { + b := new(strings.Builder) + n := len(vals) + for i := 0; i < n-1; i += 2 { + s := fmt.Sprintf("\t%#v => %#v\n", vals[i], vals[i+1]) + b.WriteString(s) + } + if n%2 != 0 { + s := fmt.Sprintf("\t%v => ", vals[n-1]) + b.WriteString(s) + } + content := b.String() + return func(s *Settings) { + s.postScripts = append(s.postScripts, &script{ + label: "mapping", + content: content, + }) + } +} + +// Func adds the string produced by f as an annotation to the output of a test case failure. +func Func(f func() string) Setting { + return func(s *Settings) { + s.postScripts = append(s.postScripts, &script{ + label: "function", + content: f(), + }) + } +} diff --git a/vendor/github.com/shoenig/test/must/settings.go b/vendor/github.com/shoenig/test/must/settings.go new file mode 100644 index 00000000..45fd7dfa --- /dev/null +++ b/vendor/github.com/shoenig/test/must/settings.go @@ -0,0 +1,49 @@ +// Code generated via scripts/generate.sh. DO NOT EDIT. + +package must + +import ( + "github.com/google/go-cmp/cmp" +) + +// Settings are used to manage a collection of Setting values used to modify +// the behavior of a test case assertion. Currently supports specifying custom +// error output content, and custom cmp.Option comparators / transforms. +// +// Use Cmp for specifying custom cmp.Option values. +// +// Use Sprint, Sprintf, Values, Func for specifying custom failure output messages. +type Settings struct { + postScripts []PostScript + cmpOptions []cmp.Option +} + +// A Setting changes the behavior of a test case assertion. +type Setting func(s *Settings) + +// Cmp enables configuring cmp.Option values for modifying the behavior of the +// cmp.Equal function. Custom cmp.Option values control how the cmp.Equal function +// determines equality between the two objects. +// +// https://github.com/google/go-cmp/blob/master/cmp/options.go#L16 +func Cmp(options ...cmp.Option) Setting { + return func(s *Settings) { + s.cmpOptions = append(s.cmpOptions, options...) + } +} + +func options(settings ...Setting) []cmp.Option { + s := new(Settings) + for _, setting := range settings { + setting(s) + } + return s.cmpOptions +} + +func scripts(settings ...Setting) []PostScript { + s := new(Settings) + for _, setting := range settings { + setting(s) + } + return s.postScripts +} diff --git a/vendor/github.com/shoenig/test/portal/portal.go b/vendor/github.com/shoenig/test/portal/portal.go new file mode 100644 index 00000000..956c230f --- /dev/null +++ b/vendor/github.com/shoenig/test/portal/portal.go @@ -0,0 +1,118 @@ +// Package portal (Port Allocator) provides a helper for reserving free TCP ports +// across multiple processes on the same machine. This works by asking the kernel +// for available ports in the ephemeral port range. It does so by binding to an +// address with port 0 (e.g. 127.0.0.1:0), modifying the socket to disable SO_LINGER, +// close the connection, and finally return the port that was used. This *probably* +// works well, because the kernel re-uses ports in an LRU fashion, implying the +// test code asking for the ports *should* be the only thing immediately asking +// to bind that port again. +package portal + +import ( + "io" + "net" + "strconv" + "sync" +) + +const ( + defaultAddress = "127.0.0.1" +) + +type FatalTester interface { + Fatalf(msg string, args ...any) +} + +// A Grabber is used to grab open ports. +type Grabber interface { + // Grab n port allocations. + Grab(n int) []int + + // One port allocation. + One() int +} + +// New creates a new Grabber with the given options. +func New(t FatalTester, opts ...Option) Grabber { + g := &grabber{ + t: t, + ip: net.ParseIP(defaultAddress), + } + + for _, opt := range opts { + opt(g) + } + + return g +} + +type grabber struct { + t FatalTester + ip net.IP + lock sync.Mutex +} + +type Option func(Grabber) + +// WithAddress specifies which address on which to allocate ports. +func WithAddress(address string) Option { + return func(g Grabber) { + g.(*grabber).ip = net.ParseIP(address) + } +} + +func (g *grabber) Grab(n int) []int { + g.lock.Lock() + defer g.lock.Unlock() + + ports := make([]int, n) + closers := make([]io.Closer, n) + + for i := 0; i < n; i++ { + p, c := g.one() + ports[i] = p + closers[i] = c + } + + for _, c := range closers { + _ = c.Close() + } + + return ports +} + +func (g *grabber) One() int { + g.lock.Lock() + defer g.lock.Unlock() + + p, c := g.one() + _ = c.Close() + return p +} + +// one will acquire one port; the caller must hold the lock and also close +// the returned listener - this minimized the chances of reallocating the same +// port +func (g *grabber) one() (int, io.Closer) { + tcpAddr := &net.TCPAddr{IP: g.ip, Port: 0} + l, listenErr := net.ListenTCP("tcp", tcpAddr) + if listenErr != nil { + g.t.Fatalf("failed to acquire port: %v", listenErr) + } + + if setErr := setSocketOpt(l); setErr != nil { + g.t.Fatalf("failed to modify socket: %v", setErr) + } + + _, port, splitErr := net.SplitHostPort(l.Addr().String()) + if splitErr != nil { + g.t.Fatalf("failed to parse address: %v", splitErr) + } + + p, parseErr := strconv.Atoi(port) + if parseErr != nil { + g.t.Fatalf("failed to parse port: %v", parseErr) + } + + return p, l +} diff --git a/vendor/github.com/shoenig/test/portal/portal_default.go b/vendor/github.com/shoenig/test/portal/portal_default.go new file mode 100644 index 00000000..4df2b314 --- /dev/null +++ b/vendor/github.com/shoenig/test/portal/portal_default.go @@ -0,0 +1,29 @@ +//go:build !windows + +package portal + +import ( + "fmt" + "net" + "syscall" +) + +func setSocketOpt(l *net.TCPListener) error { + f, fileErr := l.File() + if fileErr != nil { + return fmt.Errorf("failed to open socket file: %w", fileErr) + } + + h := int(f.Fd()) + setErr := syscall.SetsockoptLinger(h, syscall.SOL_SOCKET, syscall.SO_LINGER, &syscall.Linger{Onoff: 0, Linger: 0}) + if setErr != nil { + return fmt.Errorf("failed to set linger option: %w", setErr) + } + + closeErr := f.Close() + if closeErr != nil { + return fmt.Errorf("failed to close socket file: %w", closeErr) + } + + return nil +} diff --git a/vendor/github.com/shoenig/test/portal/portal_windows.go b/vendor/github.com/shoenig/test/portal/portal_windows.go new file mode 100644 index 00000000..2b38a8a8 --- /dev/null +++ b/vendor/github.com/shoenig/test/portal/portal_windows.go @@ -0,0 +1,12 @@ +//go:build windows + +package portal + +import ( + "net" +) + +func setSocketOpt(l *net.TCPListener) error { + // windows does not support modifying the socket; good luck! + return nil +} diff --git a/vendor/github.com/shoenig/test/wait/wait.go b/vendor/github.com/shoenig/test/wait/wait.go new file mode 100644 index 00000000..d0c44d92 --- /dev/null +++ b/vendor/github.com/shoenig/test/wait/wait.go @@ -0,0 +1,431 @@ +// Package wait provides constructs for waiting on conditionals within specified constraints. +package wait + +import ( + "context" + "errors" + "fmt" + "math" + "time" +) + +var ( + ErrTimeoutExceeded = errors.New("wait: timeout exceeded") + ErrAttemptsExceeded = errors.New("wait: attempts exceeded") + ErrConditionUnsatisfied = errors.New("wait: condition unsatisfied") + ErrNoFunction = errors.New("wait: no function specified") +) + +const ( + defaultTimeout = 3 * time.Second + defaultGap = 250 * time.Millisecond +) + +// A Constraint is something a test assertion can wait on before marking the +// result to be a failure. A Constraint is used in conjunction with either the +// InitialSuccess or ContinualSuccess option. A call to Run will execute the given +// function, returning nil or error depending on the Constraint configuration and +// the results of the function. +// +// InitialSuccess - retry a function until it returns a positive result. If the +// function never returns a positive result before the Constraint threshold is +// exceeded, an error is returned from Run(). +// +// ContinualSuccess - retry a function asserting it returns a positive result until +// the Constraint threshold is exceeded. If at any point the function returns a +// negative result, an error is returned from Run(). +// +// A Constraint threshold is configured via either Timeout or Attempts (not both). +// +// Timeout - Constraint is time bound. +// +// Attempts - Constraint is iteration bound. +// +// The use of Gap controls the pace of attempts by setting the amount of time to +// wait in between each attempt. +type Constraint struct { + continual bool // (initial || continual) success + now time.Time + deadline time.Time + gap time.Duration + iterations int + r runnable +} + +// InitialSuccess creates a new Constraint configured by opts that will wait for a +// positive result upon calling Constraint.Run. If the threshold of the Constraint +// is exceeded before reaching a positive result, an error is returned from the +// call to Constraint.Run. +// +// Timeout is used to set a maximum amount of time to wait for success. +// Attempts is used to set a maximum number of attempts to wait for success. +// Gap is used to control the amount of time to wait between retries. +// +// One of ErrorFunc, BoolFunc, or TestFunc represents the function that will +// be run under the constraint. +func InitialSuccess(opts ...Option) *Constraint { + c := &Constraint{now: time.Now()} + c.setup(opts...) + return c +} + +// ContinualSuccess creates a new Constraint configured by opts that will assert +// a positive result upon calling Constraint.Run, repeating the call until the +// Constraint reaches its threshold. If the result is negative, an error is +// returned from the call to Constraint.Run. +// +// Timeout is used to set the amount of time to assert success. +// Attempts is used to set the number of iterations to assert success. +// Gap is used to control the amount of time to wait between iterations. +// +// One of ErrorFunc, BoolFunc, or TestFunc represents the function that will +// be run under the constraint. +func ContinualSuccess(opts ...Option) *Constraint { + c := &Constraint{now: time.Now(), continual: true} + c.setup(opts...) + return c +} + +// Timeout sets a time bound on a Constraint. +// +// If set, the Attempts constraint configuration is disabled. +// +// Default 3 seconds. +func Timeout(duration time.Duration) Option { + return func(c *Constraint) { + c.deadline = time.Now().Add(duration) + c.iterations = math.MaxInt + } +} + +// Attempts sets an iteration bound on a Constraint. +// +// If set, the Timeout constraint configuration is disabled. +// +// By default a Timeout constraint is set and the Attempts bound is disabled. +func Attempts(max int) Option { + return func(c *Constraint) { + c.iterations = max + c.deadline = time.Date(9999, 0, 0, 0, 0, 0, 0, time.UTC) + } +} + +// Gap sets the amount of time to wait between attempts. +// +// Default 250 milliseconds. +func Gap(duration time.Duration) Option { + return func(c *Constraint) { + c.gap = duration + } +} + +// BoolFunc executes f under the thresholds of a Constraint. +func BoolFunc(f func() bool) Option { + return func(c *Constraint) { + if c.continual { + c.r = boolFuncContinual(f) + } else { + c.r = boolFuncInitial(f) + } + } +} + +// Option is used to configure a Constraint. +// +// Understood Option functions include Timeout, Attempts, Gap, InitialSuccess, +// and ContinualSuccess. +type Option func(*Constraint) + +type runnable func(*runner) *result + +type runner struct { + c *Constraint + attempts int +} + +type result struct { + Err error +} + +func boolFuncContinual(f func() bool) runnable { + bg := context.Background() + return func(r *runner) *result { + ctx, cancel := context.WithDeadline(bg, r.c.deadline) + defer cancel() + + timer := time.NewTimer(0) + defer timer.Stop() + + for { + // make an attempt + if !f() { + return &result{Err: ErrConditionUnsatisfied} + } + + // used another attempt + r.attempts++ + + // reached the desired attempts + if r.attempts >= r.c.iterations { + return &result{Err: nil} + } + + // reset timer to gap interval + timer.Reset(r.c.gap) + + // wait for gap or time + select { + case <-ctx.Done(): + return &result{Err: nil} + case <-timer.C: + // continue + } + } + } +} + +func boolFuncInitial(f func() bool) runnable { + bg := context.Background() + return func(r *runner) *result { + ctx, cancel := context.WithDeadline(bg, r.c.deadline) + defer cancel() + + timer := time.NewTimer(0) + defer timer.Stop() + + for { + // make an attempt + if f() { + return &result{Err: nil} + } + + // used another attempt + r.attempts++ + + // check iterations + if r.attempts > r.c.iterations { + return &result{Err: ErrAttemptsExceeded} + } + + // reset timer to gap interval + timer.Reset(r.c.gap) + + // wait for gap or timeout + select { + case <-ctx.Done(): + return &result{Err: ErrTimeoutExceeded} + case <-timer.C: + // continue + } + } + } +} + +// ErrorFunc will retry f while it returns a non-nil error, or until a wait +// constraint threshold is exceeded. +func ErrorFunc(f func() error) Option { + return func(c *Constraint) { + if c.continual { + c.r = errFuncContinual(f) + } else { + c.r = errFuncInitial(f) + } + } +} + +func errFuncContinual(f func() error) runnable { + bg := context.Background() + return func(r *runner) *result { + ctx, cancel := context.WithDeadline(bg, r.c.deadline) + defer cancel() + + timer := time.NewTimer(0) + defer timer.Stop() + + for { + // make an attempt + if err := f(); err != nil { + return &result{Err: err} + } + + // used another attempt + r.attempts++ + + // reached the desired attempts + if r.attempts >= r.c.iterations { + return &result{Err: nil} + } + + // reset timer to gap interval + timer.Reset(r.c.gap) + + // wait for gap or time + select { + case <-ctx.Done(): + return &result{Err: nil} + case <-timer.C: + // continue + } + } + } +} + +func errFuncInitial(f func() error) runnable { + bg := context.Background() + return func(r *runner) *result { + ctx, cancel := context.WithDeadline(bg, r.c.deadline) + defer cancel() + + timer := time.NewTimer(0) + defer timer.Stop() + + for { + // make an attempt + err := f() + if err == nil { + return &result{Err: nil} + } + + // used another attempt + r.attempts++ + + // check iterations + if r.attempts > r.c.iterations { + return &result{ + Err: fmt.Errorf("%v: %w", ErrAttemptsExceeded, err), + } + } + + // reset timer to gap interval + timer.Reset(r.c.gap) + + // wait for gap or timeout + select { + case <-ctx.Done(): + return &result{ + Err: fmt.Errorf("%v: %w", ErrTimeoutExceeded, err), + } + case <-timer.C: + // continue + } + } + } +} + +// TestFunc will retry f while it returns false, or until a wait constraint +// threshold is exceeded. If f never succeeds, the latest returned error is +// wrapped into the result. +func TestFunc(f func() (bool, error)) Option { + return func(c *Constraint) { + if c.continual { + c.r = testFuncContinual(f) + } else { + c.r = testFuncInitial(f) + } + } +} + +func testFuncContinual(f func() (bool, error)) runnable { + bg := context.Background() + return func(r *runner) *result { + ctx, cancel := context.WithDeadline(bg, r.c.deadline) + defer cancel() + + timer := time.NewTimer(0) + defer timer.Stop() + + for { + // make an attempt + ok, err := f() + if !ok { + return &result{Err: fmt.Errorf("%v: %w", ErrConditionUnsatisfied, err)} + } + + // used another attempt + r.attempts++ + + // reached the desired attempts + if r.attempts >= r.c.iterations { + return &result{Err: nil} + } + + // reset timer to gap interval + timer.Reset(r.c.gap) + + // wait for gap or time + select { + case <-ctx.Done(): + return &result{Err: nil} + case <-timer.C: + // continue + } + } + } +} + +func testFuncInitial(f func() (bool, error)) runnable { + bg := context.Background() + return func(r *runner) *result { + ctx, cancel := context.WithDeadline(bg, r.c.deadline) + defer cancel() + + timer := time.NewTimer(0) + defer timer.Stop() + + for { + // make an attempt + ok, err := f() + if ok { + return &result{Err: nil} + } + + // set default error + if err == nil { + err = ErrConditionUnsatisfied + } + + // used another attempt + r.attempts++ + + // check iterations + if r.attempts > r.c.iterations { + return &result{ + Err: fmt.Errorf("%v: %w", ErrAttemptsExceeded, err), + } + } + + // reset timer to gap interval + timer.Reset(r.c.gap) + + // wait for gap or timeout + select { + case <-ctx.Done(): + return &result{ + Err: fmt.Errorf("%v: %w", ErrTimeoutExceeded, err), + } + case <-timer.C: + // continue + } + } + } +} + +func (c *Constraint) setup(opts ...Option) { + for _, opt := range append([]Option{ + Timeout(defaultTimeout), + Gap(defaultGap), + }, opts...) { + opt(c) + } +} + +// Run the Constraint and produce an error result. +func (c *Constraint) Run() error { + if c.r == nil { + return ErrNoFunction + } + return c.r(&runner{ + c: c, + attempts: 0, + }).Err +} diff --git a/vendor/github.com/tklauser/go-sysconf/.cirrus.yml b/vendor/github.com/tklauser/go-sysconf/.cirrus.yml index c7d5293a..6be2c354 100644 --- a/vendor/github.com/tklauser/go-sysconf/.cirrus.yml +++ b/vendor/github.com/tklauser/go-sysconf/.cirrus.yml @@ -1,22 +1,23 @@ env: CIRRUS_CLONE_DEPTH: 1 + GO_VERSION: go1.19.1 freebsd_12_task: freebsd_instance: image_family: freebsd-12-3 install_script: | - pkg install -y git go - GOBIN=$PWD/bin go install golang.org/dl/go1.17.7@latest - bin/go1.17.7 download - build_script: bin/go1.17.7 build -v ./... - test_script: bin/go1.17.7 test -race ./... + pkg install -y go + GOBIN=$PWD/bin go install golang.org/dl/${GO_VERSION}@latest + bin/${GO_VERSION} download + build_script: bin/${GO_VERSION} build -v ./... + test_script: bin/${GO_VERSION} test -race ./... freebsd_13_task: freebsd_instance: image_family: freebsd-13-0 install_script: | - pkg install -y git go - GOBIN=$PWD/bin go install golang.org/dl/go1.17.7@latest - bin/go1.17.7 download - build_script: bin/go1.17.7 build -v ./... - test_script: bin/go1.17.7 test -race ./... + pkg install -y go + GOBIN=$PWD/bin go install golang.org/dl/${GO_VERSION}@latest + bin/${GO_VERSION} download + build_script: bin/${GO_VERSION} build -v ./... + test_script: bin/${GO_VERSION} test -race ./... diff --git a/vendor/github.com/tklauser/go-sysconf/LICENSE b/vendor/github.com/tklauser/go-sysconf/LICENSE index cf198deb..73c6b899 100644 --- a/vendor/github.com/tklauser/go-sysconf/LICENSE +++ b/vendor/github.com/tklauser/go-sysconf/LICENSE @@ -1,6 +1,6 @@ BSD 3-Clause License -Copyright (c) 2018-2021, Tobias Klauser +Copyright (c) 2018-2022, Tobias Klauser All rights reserved. Redistribution and use in source and binary forms, with or without diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_riscv64.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_riscv64.go new file mode 100644 index 00000000..b7cff760 --- /dev/null +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_riscv64.go @@ -0,0 +1,12 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs sysconf_values_freebsd.go + +//go:build freebsd && riscv64 +// +build freebsd,riscv64 + +package sysconf + +const ( + _LONG_MAX = 0x7fffffffffffffff + _SHRT_MAX = 0x7fff +) diff --git a/vendor/github.com/tklauser/numcpus/.cirrus.yml b/vendor/github.com/tklauser/numcpus/.cirrus.yml index 11a39e21..53c0110b 100644 --- a/vendor/github.com/tklauser/numcpus/.cirrus.yml +++ b/vendor/github.com/tklauser/numcpus/.cirrus.yml @@ -1,12 +1,13 @@ env: CIRRUS_CLONE_DEPTH: 1 + GO_VERSION: go1.19.1 freebsd_12_task: freebsd_instance: image_family: freebsd-12-3 install_script: | - pkg install -y git go - GOBIN=$PWD/bin go install golang.org/dl/go1.17.6@latest - bin/go1.17.6 download - build_script: bin/go1.17.6 build -v ./... - test_script: bin/go1.17.6 test -race ./... + pkg install -y go + GOBIN=$PWD/bin go install golang.org/dl/${GO_VERSION}@latest + bin/${GO_VERSION} download + build_script: bin/${GO_VERSION} build -buildvcs=false -v ./... + test_script: bin/${GO_VERSION} test -buildvcs=false -race ./... diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/conversion.go b/vendor/github.com/zclconf/go-cty/cty/convert/conversion.go index ededc5f3..541b9a49 100644 --- a/vendor/github.com/zclconf/go-cty/cty/convert/conversion.go +++ b/vendor/github.com/zclconf/go-cty/cty/convert/conversion.go @@ -43,14 +43,14 @@ func getConversion(in cty.Type, out cty.Type, unsafe bool) conversion { out = out.WithoutOptionalAttributesDeep() if !isKnown { - return cty.UnknownVal(out), nil + return cty.UnknownVal(dynamicReplace(in.Type(), out)), nil } if isNull { // We'll pass through nulls, albeit type converted, and let // the caller deal with whatever handling they want to do in // case null values are considered valid in some applications. - return cty.NullVal(out), nil + return cty.NullVal(dynamicReplace(in.Type(), out)), nil } } diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/conversion_collection.go b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_collection.go index e70b0184..05399c9a 100644 --- a/vendor/github.com/zclconf/go-cty/cty/convert/conversion_collection.go +++ b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_collection.go @@ -39,6 +39,11 @@ func conversionCollectionToList(ety cty.Type, conv conversion) conversion { return cty.NilVal, err } } + + if val.IsNull() { + val = cty.NullVal(val.Type().WithoutOptionalAttributesDeep()) + } + elems = append(elems, val) i++ @@ -50,7 +55,7 @@ func conversionCollectionToList(ety cty.Type, conv conversion) conversion { if ety == cty.DynamicPseudoType { return cty.ListValEmpty(val.Type().ElementType()), nil } - return cty.ListValEmpty(ety), nil + return cty.ListValEmpty(ety.WithoutOptionalAttributesDeep()), nil } if !cty.CanListVal(elems) { @@ -88,6 +93,11 @@ func conversionCollectionToSet(ety cty.Type, conv conversion) conversion { return cty.NilVal, err } } + + if val.IsNull() { + val = cty.NullVal(val.Type().WithoutOptionalAttributesDeep()) + } + elems = append(elems, val) i++ @@ -99,7 +109,7 @@ func conversionCollectionToSet(ety cty.Type, conv conversion) conversion { if ety == cty.DynamicPseudoType { return cty.SetValEmpty(val.Type().ElementType()), nil } - return cty.SetValEmpty(ety), nil + return cty.SetValEmpty(ety.WithoutOptionalAttributesDeep()), nil } if !cty.CanSetVal(elems) { @@ -180,7 +190,7 @@ func conversionTupleToSet(tupleType cty.Type, setEty cty.Type, unsafe bool) conv if len(tupleEtys) == 0 { // Empty tuple short-circuit return func(val cty.Value, path cty.Path) (cty.Value, error) { - return cty.SetValEmpty(setEty), nil + return cty.SetValEmpty(setEty.WithoutOptionalAttributesDeep()), nil } } @@ -242,6 +252,11 @@ func conversionTupleToSet(tupleType cty.Type, setEty cty.Type, unsafe bool) conv return cty.NilVal, err } } + + if val.IsNull() { + val = cty.NullVal(val.Type().WithoutOptionalAttributesDeep()) + } + elems = append(elems, val) i++ @@ -265,7 +280,7 @@ func conversionTupleToList(tupleType cty.Type, listEty cty.Type, unsafe bool) co if len(tupleEtys) == 0 { // Empty tuple short-circuit return func(val cty.Value, path cty.Path) (cty.Value, error) { - return cty.ListValEmpty(listEty), nil + return cty.ListValEmpty(listEty.WithoutOptionalAttributesDeep()), nil } } @@ -357,7 +372,7 @@ func conversionObjectToMap(objectType cty.Type, mapEty cty.Type, unsafe bool) co if len(objectAtys) == 0 { // Empty object short-circuit return func(val cty.Value, path cty.Path) (cty.Value, error) { - return cty.MapValEmpty(mapEty), nil + return cty.MapValEmpty(mapEty.WithoutOptionalAttributesDeep()), nil } } @@ -448,13 +463,28 @@ func conversionMapToObject(mapType cty.Type, objType cty.Type, unsafe bool) conv elemConvs[name] = getConversion(mapEty, objectAty, unsafe) if elemConvs[name] == nil { - // If any of our element conversions are impossible, then the our - // whole conversion is impossible. + // This means that this conversion is impossible. Typically, we + // would give up at this point and declare the whole conversion + // impossible. But, if this attribute is optional then maybe we will + // be able to do this conversion anyway provided the actual concrete + // map doesn't have this value set. + // + // We only do this in "unsafe" mode, because we cannot guarantee + // that the returned conversion will actually succeed once applied. + if objType.AttributeOptional(name) && unsafe { + // This attribute is optional, so let's leave this conversion in + // as a nil, and we can error later if we actually have to + // convert this. + continue + } + + // Otherwise, give up. This conversion is impossible as we have a + // required attribute that doesn't match the map's inner type. return nil } } - // If we fall out here then a conversion is possible, using the + // If we fall out here then a conversion may be possible, using the // element conversions in elemConvs return func(val cty.Value, path cty.Path) (cty.Value, error) { elems := make(map[string]cty.Value, len(elemConvs)) @@ -474,12 +504,43 @@ func conversionMapToObject(mapType cty.Type, objType cty.Type, unsafe bool) conv Key: name, } - conv := elemConvs[name.AsString()] - if conv != nil { + // There are 3 cases here: + // 1. This attribute is not in elemConvs + // 2. This attribute is in elemConvs and is not nil + // 3. This attribute is in elemConvs and is nil. + + // In case 1, we do not enter any of the branches below. This case + // means the attribute type is the same between the map and the + // object, and we don't need to do any conversion. + + if conv, ok := elemConvs[name.AsString()]; conv != nil { + // This is case 2. The attribute type is different between the + // map and the object, and we know how to convert between them. + // So, we reset val to be the converted value and carry on. val, err = conv(val, elemPath) if err != nil { return cty.NilVal, err } + } else if ok { + // This is case 3 and it is an error. The attribute types are + // different between the map and the object, but we cannot + // convert between them. + // + // Now typically, this would be picked earlier on when we were + // building elemConvs. However, in the case of optional + // attributes there was a chance we could still convert the + // overall object even if this particular attribute was not + // convertable. This is because it could have not been set in + // the map, and we could skip over it here and set a null value. + // + // Since we reached this branch, we know that map did actually + // contain a non-convertable optional attribute. This means we + // error. + return cty.NilVal, path.NewErrorf("map element type is incompatible with attribute %q: %s", name.AsString(), MismatchMessage(val.Type(), objType.AttributeType(name.AsString()))) + } + + if val.IsNull() { + val = cty.NullVal(val.Type().WithoutOptionalAttributesDeep()) } elems[name.AsString()] = val diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/conversion_dynamic.go b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_dynamic.go index 4d19cf6c..3b554e01 100644 --- a/vendor/github.com/zclconf/go-cty/cty/convert/conversion_dynamic.go +++ b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_dynamic.go @@ -31,3 +31,107 @@ func dynamicFixup(wantType cty.Type) conversion { func dynamicPassthrough(in cty.Value, path cty.Path) (cty.Value, error) { return in, nil } + +// dynamicReplace aims to return the out type unchanged, but if it finds a +// dynamic type either directly or in any descendent elements it replaces them +// with the equivalent type from in. +// +// This function assumes that in and out are compatible from a Convert +// perspective, and will panic if it finds that they are not. For example if +// in is an object and out is a map, this function will still attempt to iterate +// through both as if they were the same. +func dynamicReplace(in, out cty.Type) cty.Type { + if in == cty.DynamicPseudoType || in == cty.NilType { + // Short circuit this case, there's no point worrying about this if in + // is a dynamic type or a nil type. Out is the best we can do. + return out + } + + switch { + case out == cty.DynamicPseudoType: + // So replace out with in. + return in + case out.IsPrimitiveType(), out.IsCapsuleType(): + // out is not dynamic and it doesn't contain descendent elements so just + // return it unchanged. + return out + case out.IsMapType(): + var elemType cty.Type + + // Maps are compatible with other maps or objects. + if in.IsMapType() { + elemType = dynamicReplace(in.ElementType(), out.ElementType()) + } + + if in.IsObjectType() { + var types []cty.Type + for _, t := range in.AttributeTypes() { + types = append(types, t) + } + unifiedType, _ := unify(types, true) + elemType = dynamicReplace(unifiedType, out.ElementType()) + } + + return cty.Map(elemType) + case out.IsObjectType(): + // Objects are compatible with other objects and maps. + outTypes := map[string]cty.Type{} + if in.IsMapType() { + for attr, attrType := range out.AttributeTypes() { + outTypes[attr] = dynamicReplace(in.ElementType(), attrType) + } + } + + if in.IsObjectType() { + for attr, attrType := range out.AttributeTypes() { + if !in.HasAttribute(attr) { + // If in does not have this attribute, then it is an + // optional attribute and there is nothing we can do except + // to return the type from out even if it is dynamic. + outTypes[attr] = attrType + continue + } + outTypes[attr] = dynamicReplace(in.AttributeType(attr), attrType) + } + } + + return cty.Object(outTypes) + case out.IsSetType(): + var elemType cty.Type + + // Sets are compatible with other sets, lists, tuples. + if in.IsSetType() || in.IsListType() { + elemType = dynamicReplace(in.ElementType(), out.ElementType()) + } + + if in.IsTupleType() { + unifiedType, _ := unify(in.TupleElementTypes(), true) + elemType = dynamicReplace(unifiedType, out.ElementType()) + } + + return cty.Set(elemType) + case out.IsListType(): + var elemType cty.Type + + // Lists are compatible with other lists, sets, and tuples. + if in.IsSetType() || in.IsListType() { + elemType = dynamicReplace(in.ElementType(), out.ElementType()) + } + + if in.IsTupleType() { + unifiedType, _ := unify(in.TupleElementTypes(), true) + elemType = dynamicReplace(unifiedType, out.ElementType()) + } + + return cty.List(elemType) + case out.IsTupleType(): + // Tuples are only compatible with other tuples + var types []cty.Type + for ix := 0; ix < len(out.TupleElementTypes()); ix++ { + types = append(types, dynamicReplace(in.TupleElementType(ix), out.TupleElementType(ix))) + } + return cty.Tuple(types) + default: + panic("unrecognized type " + out.FriendlyName()) + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/conversion_object.go b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_object.go index 098c109b..51958ef4 100644 --- a/vendor/github.com/zclconf/go-cty/cty/convert/conversion_object.go +++ b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_object.go @@ -80,13 +80,19 @@ func conversionObjectToObject(in, out cty.Type, unsafe bool) conversion { } } + if val.IsNull() { + // Strip optional attributes out of the embedded type for null + // values. + val = cty.NullVal(val.Type().WithoutOptionalAttributesDeep()) + } + attrVals[name] = val } for name := range outOptionals { if _, exists := attrVals[name]; !exists { wantTy := outAtys[name] - attrVals[name] = cty.NullVal(wantTy) + attrVals[name] = cty.NullVal(wantTy.WithoutOptionalAttributesDeep()) } } diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/public.go b/vendor/github.com/zclconf/go-cty/cty/convert/public.go index af19bdc5..aab0d0ec 100644 --- a/vendor/github.com/zclconf/go-cty/cty/convert/public.go +++ b/vendor/github.com/zclconf/go-cty/cty/convert/public.go @@ -40,7 +40,7 @@ func GetConversionUnsafe(in cty.Type, out cty.Type) Conversion { // This is a convenience wrapper around calling GetConversionUnsafe and then // immediately passing the given value to the resulting function. func Convert(in cty.Value, want cty.Type) (cty.Value, error) { - if in.Type().Equals(want) { + if in.Type().Equals(want.WithoutOptionalAttributesDeep()) { return in, nil } diff --git a/vendor/github.com/zclconf/go-cty/cty/function/argument.go b/vendor/github.com/zclconf/go-cty/cty/function/argument.go index 5a26c275..61a1cf97 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/argument.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/argument.go @@ -10,6 +10,9 @@ type Parameter struct { // value, but callers may use it for documentation, etc. Name string + // Description is an optional description for the argument. + Description string + // A type that any argument for this parameter must conform to. // cty.DynamicPseudoType can be used, either at top-level or nested // in a parameterized type, to indicate that any type should be diff --git a/vendor/github.com/zclconf/go-cty/cty/function/function.go b/vendor/github.com/zclconf/go-cty/cty/function/function.go index c00a0e7f..c4d99f6c 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/function.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/function.go @@ -14,6 +14,9 @@ type Function struct { // Spec is the specification of a function, used to instantiate // a new Function. type Spec struct { + // Description is an optional description for the function specification. + Description string + // Params is a description of the positional parameters for the function. // The standard checking logic rejects any calls that do not provide // arguments conforming to this definition, freeing the function @@ -344,3 +347,62 @@ func (f Function) VarParam() *Parameter { ret := *f.spec.VarParam return &ret } + +// Description returns a human-readable description of the function. +func (f Function) Description() string { + return f.spec.Description +} + +// WithNewDescriptions returns a new function that has the same signature +// and implementation as the receiver but has the function description and +// the parameter descriptions replaced with those given in the arguments. +// +// All descriptions may be given as an empty string to specify that there +// should be no description at all. +// +// The paramDescs argument must match the number of parameters +// the reciever expects, or this function will panic. If the function has a +// VarParam then that counts as one parameter for the sake of this rule. The +// given descriptions will be assigned in order starting with the positional +// arguments in their declared order, followed by the variadic parameter if +// any. +// +// As a special case, WithNewDescriptions will accept a paramDescs which +// does not cover the reciever's variadic parameter (if any), so that it's +// possible to add a variadic parameter to a function which didn't previously +// have one without that being a breaking change for an existing caller using +// WithNewDescriptions against that function. In this case the base description +// of the variadic parameter will be preserved. +func (f Function) WithNewDescriptions(funcDesc string, paramDescs []string) Function { + retSpec := *f.spec // shallow copy of the reciever + retSpec.Description = funcDesc + + retSpec.Params = make([]Parameter, len(f.spec.Params)) + copy(retSpec.Params, f.spec.Params) // shallow copy of positional parameters + if f.spec.VarParam != nil { + retVarParam := *f.spec.VarParam // shallow copy of variadic parameter + retSpec.VarParam = &retVarParam + } + + if retSpec.VarParam != nil { + if with, without := len(retSpec.Params)+1, len(retSpec.Params); len(paramDescs) != with && len(paramDescs) != without { + panic(fmt.Sprintf("paramDescs must have length of either %d or %d", with, without)) + } + } else { + if want := len(retSpec.Params); len(paramDescs) != want { + panic(fmt.Sprintf("paramDescs must have length %d", want)) + } + } + + posParamDescs := paramDescs[:len(retSpec.Params)] + varParamDescs := paramDescs[len(retSpec.Params):] // guaranteed to be zero or one elements because of the rules above + + for i, desc := range posParamDescs { + retSpec.Params[i].Description = desc + } + for _, desc := range varParamDescs { + retSpec.VarParam.Description = desc + } + + return New(&retSpec) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bool.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bool.go index 4f1ecc8d..8192d8ce 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bool.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bool.go @@ -6,6 +6,7 @@ import ( ) var NotFunc = function.New(&function.Spec{ + Description: `Applies the logical NOT operation to the given boolean value.`, Params: []function.Parameter{ { Name: "val", @@ -21,6 +22,7 @@ var NotFunc = function.New(&function.Spec{ }) var AndFunc = function.New(&function.Spec{ + Description: `Applies the logical AND operation to the given boolean values.`, Params: []function.Parameter{ { Name: "a", @@ -42,6 +44,7 @@ var AndFunc = function.New(&function.Spec{ }) var OrFunc = function.New(&function.Spec{ + Description: `Applies the logical OR operation to the given boolean values.`, Params: []function.Parameter{ { Name: "a", diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bytes.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bytes.go index a132e0cd..3fe600ff 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bytes.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bytes.go @@ -30,6 +30,7 @@ func BytesVal(buf []byte) cty.Value { // BytesLen is a Function that returns the length of the buffer encapsulated // in a Bytes value. var BytesLenFunc = function.New(&function.Spec{ + Description: `Returns the total number of bytes in the given buffer.`, Params: []function.Parameter{ { Name: "buf", @@ -46,6 +47,7 @@ var BytesLenFunc = function.New(&function.Spec{ // BytesSlice is a Function that returns a slice of the given Bytes value. var BytesSliceFunc = function.New(&function.Spec{ + Description: `Extracts a subslice from the given buffer.`, Params: []function.Parameter{ { Name: "buf", diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/collection.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/collection.go index a91821e9..0573e74e 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/collection.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/collection.go @@ -12,6 +12,7 @@ import ( ) var HasIndexFunc = function.New(&function.Spec{ + Description: `Returns true if if the given collection can be indexed with the given key without producing an error, or false otherwise.`, Params: []function.Parameter{ { Name: "collection", @@ -37,6 +38,7 @@ var HasIndexFunc = function.New(&function.Spec{ }) var IndexFunc = function.New(&function.Spec{ + Description: `Returns the element with the given key from the given collection, or raises an error if there is no such element.`, Params: []function.Parameter{ { Name: "collection", @@ -106,6 +108,7 @@ var IndexFunc = function.New(&function.Spec{ }) var LengthFunc = function.New(&function.Spec{ + Description: `Returns the number of elements in the given collection.`, Params: []function.Parameter{ { Name: "collection", @@ -127,6 +130,7 @@ var LengthFunc = function.New(&function.Spec{ }) var ElementFunc = function.New(&function.Spec{ + Description: `Returns the element with the given index from the given list or tuple, applying the modulo operation to the given index if it's greater than the number of elements.`, Params: []function.Parameter{ { Name: "list", @@ -206,9 +210,11 @@ var ElementFunc = function.New(&function.Spec{ // CoalesceListFunc is a function that takes any number of list arguments // and returns the first one that isn't empty. var CoalesceListFunc = function.New(&function.Spec{ - Params: []function.Parameter{}, + Description: `Returns the first of the given sequences that has a length greater than zero.`, + Params: []function.Parameter{}, VarParam: &function.Parameter{ Name: "vals", + Description: `List or tuple values to test in the given order.`, Type: cty.DynamicPseudoType, AllowUnknown: true, AllowDynamicType: true, @@ -270,6 +276,7 @@ var CoalesceListFunc = function.New(&function.Spec{ // CompactFunc is a function that takes a list of strings and returns a new list // with any empty string elements removed. var CompactFunc = function.New(&function.Spec{ + Description: `Removes all empty string elements from the given list of strings.`, Params: []function.Parameter{ { Name: "list", @@ -306,6 +313,7 @@ var CompactFunc = function.New(&function.Spec{ // ContainsFunc is a function that determines whether a given list or // set contains a given single value as one of its elements. var ContainsFunc = function.New(&function.Spec{ + Description: `Returns true if the given value is a value in the given list, tuple, or set, or false otherwise.`, Params: []function.Parameter{ { Name: "list", @@ -364,6 +372,7 @@ var ContainsFunc = function.New(&function.Spec{ // DistinctFunc is a function that takes a list and returns a new list // with any duplicate elements removed. var DistinctFunc = function.New(&function.Spec{ + Description: `Removes any duplicate values from the given list, preserving the order of remaining elements.`, Params: []function.Parameter{ { Name: "list", @@ -399,14 +408,17 @@ var DistinctFunc = function.New(&function.Spec{ // ChunklistFunc is a function that splits a single list into fixed-size chunks, // returning a list of lists. var ChunklistFunc = function.New(&function.Spec{ + Description: `Splits a single list into multiple lists where each has at most the given number of elements.`, Params: []function.Parameter{ { Name: "list", + Description: `The list to split into chunks.`, Type: cty.List(cty.DynamicPseudoType), AllowMarked: true, }, { Name: "size", + Description: `The maximum length of each chunk. All but the last element of the result is guaranteed to be of exactly this size.`, Type: cty.Number, AllowMarked: true, }, @@ -471,6 +483,7 @@ var ChunklistFunc = function.New(&function.Spec{ // FlattenFunc is a function that takes a list and replaces any elements // that are lists with a flattened sequence of the list contents. var FlattenFunc = function.New(&function.Spec{ + Description: `Transforms a list, set, or tuple value into a tuple by replacing any given elements that are themselves sequences with a flattened tuple of all of the nested elements concatenated together.`, Params: []function.Parameter{ { Name: "list", @@ -567,9 +580,11 @@ func flattener(flattenList cty.Value) ([]cty.Value, []cty.ValueMarks, bool) { // KeysFunc is a function that takes a map and returns a sorted list of the map keys. var KeysFunc = function.New(&function.Spec{ + Description: `Returns a list of the keys of the given map in lexicographical order.`, Params: []function.Parameter{ { Name: "inputMap", + Description: `The map to extract keys from. May instead be an object-typed value, in which case the result is a tuple of the object attributes.`, Type: cty.DynamicPseudoType, AllowUnknown: true, AllowMarked: true, @@ -642,6 +657,7 @@ var KeysFunc = function.New(&function.Spec{ // LookupFunc is a function that performs dynamic lookups of map types. var LookupFunc = function.New(&function.Spec{ + Description: `Returns the value of the element with the given key from the given map, or returns the default value if there is no such element.`, Params: []function.Parameter{ { Name: "inputMap", @@ -734,7 +750,8 @@ var LookupFunc = function.New(&function.Spec{ // If more than one given map or object defines the same key then the one that // is later in the argument sequence takes precedence. var MergeFunc = function.New(&function.Spec{ - Params: []function.Parameter{}, + Description: `Merges all of the elements from the given maps into a single map, or the attributes from given objects into a single object.`, + Params: []function.Parameter{}, VarParam: &function.Parameter{ Name: "maps", Type: cty.DynamicPseudoType, @@ -850,6 +867,7 @@ var MergeFunc = function.New(&function.Spec{ // ReverseListFunc takes a sequence and produces a new sequence of the same length // with all of the same elements as the given sequence but in reverse order. var ReverseListFunc = function.New(&function.Spec{ + Description: `Returns the given list with its elements in reverse order.`, Params: []function.Parameter{ { Name: "list", @@ -898,9 +916,11 @@ var ReverseListFunc = function.New(&function.Spec{ // preserving the ordering of all of the input lists. Otherwise the result is a // set of tuples. var SetProductFunc = function.New(&function.Spec{ - Params: []function.Parameter{}, + Description: `Calculates the cartesian product of two or more sets.`, + Params: []function.Parameter{}, VarParam: &function.Parameter{ Name: "sets", + Description: "The sets to consider. Also accepts lists and tuples, and if all arguments are of list or tuple type then the result will preserve the input ordering", Type: cty.DynamicPseudoType, AllowMarked: true, }, @@ -1038,6 +1058,7 @@ var SetProductFunc = function.New(&function.Spec{ // SliceFunc is a function that extracts some consecutive elements // from within a list. var SliceFunc = function.New(&function.Spec{ + Description: `Extracts a subslice of the given list or tuple value.`, Params: []function.Parameter{ { Name: "list", @@ -1159,9 +1180,10 @@ func sliceIndexes(args []cty.Value) (int, int, bool, error) { // ValuesFunc is a function that returns a list of the map values, // in the order of the sorted keys. var ValuesFunc = function.New(&function.Spec{ + Description: `Returns the values of elements of a given map, or the values of attributes of a given object, in lexicographic order by key or attribute name.`, Params: []function.Parameter{ { - Name: "values", + Name: "mapping", Type: cty.DynamicPseudoType, AllowMarked: true, }, @@ -1226,6 +1248,7 @@ var ValuesFunc = function.New(&function.Spec{ // ZipmapFunc is a function that constructs a map from a list of keys // and a corresponding list of values. var ZipmapFunc = function.New(&function.Spec{ + Description: `Constructs a map from a list of keys and a corresponding list of values, which must both be of the same length.`, Params: []function.Parameter{ { Name: "keys", diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/conversion.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/conversion.go index 66eb97e2..f61b5340 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/conversion.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/conversion.go @@ -1,6 +1,7 @@ package stdlib import ( + "fmt" "strconv" "github.com/zclconf/go-cty/cty" @@ -18,6 +19,7 @@ import ( // a tuple. func MakeToFunc(wantTy cty.Type) function.Function { return function.New(&function.Spec{ + Description: fmt.Sprintf("Converts the given value to %s, or raises an error if that conversion is impossible.", wantTy.FriendlyName()), Params: []function.Parameter{ { Name: "v", diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/csv.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/csv.go index 339d04db..20d82bcd 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/csv.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/csv.go @@ -11,6 +11,7 @@ import ( ) var CSVDecodeFunc = function.New(&function.Spec{ + Description: `Parses the given string as Comma Separated Values (as defined by RFC 4180) and returns a map of objects representing the table of data, using the first row as a header row to define the object attributes.`, Params: []function.Parameter{ { Name: "str", diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/datetime.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/datetime.go index 1ceffcf6..6c0ee05e 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/datetime.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/datetime.go @@ -12,6 +12,7 @@ import ( ) var FormatDateFunc = function.New(&function.Spec{ + Description: `Formats a timestamp given in RFC 3339 syntax into another timestamp in some other machine-oriented time syntax, as described in the format string.`, Params: []function.Parameter{ { Name: "format", @@ -205,6 +206,7 @@ var FormatDateFunc = function.New(&function.Spec{ // TimeAddFunc is a function that adds a duration to a timestamp, returning a new timestamp. var TimeAddFunc = function.New(&function.Spec{ + Description: `Adds the duration represented by the given duration string to the given RFC 3339 timestamp string, returning another RFC 3339 timestamp.`, Params: []function.Parameter{ { Name: "timestamp", diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format.go index 8b177589..ca163a87 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format.go @@ -18,6 +18,7 @@ import ( //go:generate gofmt -w format_fsm.go var FormatFunc = function.New(&function.Spec{ + Description: `Constructs a string by applying formatting verbs to a series of arguments, using a similar syntax to the C function \"printf\".`, Params: []function.Parameter{ { Name: "format", @@ -45,6 +46,7 @@ var FormatFunc = function.New(&function.Spec{ }) var FormatListFunc = function.New(&function.Spec{ + Description: `Constructs a list of strings by applying formatting verbs to a series of arguments, using a similar syntax to the C function \"printf\".`, Params: []function.Parameter{ { Name: "format", diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/general.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/general.go index 6b31f266..4f70fff9 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/general.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/general.go @@ -9,6 +9,7 @@ import ( ) var EqualFunc = function.New(&function.Spec{ + Description: `Returns true if the two given values are equal, or false otherwise.`, Params: []function.Parameter{ { Name: "a", @@ -32,6 +33,7 @@ var EqualFunc = function.New(&function.Spec{ }) var NotEqualFunc = function.New(&function.Spec{ + Description: `Returns false if the two given values are equal, or true otherwise.`, Params: []function.Parameter{ { Name: "a", @@ -55,7 +57,8 @@ var NotEqualFunc = function.New(&function.Spec{ }) var CoalesceFunc = function.New(&function.Spec{ - Params: []function.Parameter{}, + Description: `Returns the first of the given arguments that isn't null, or raises an error if there are no non-null arguments.`, + Params: []function.Parameter{}, VarParam: &function.Parameter{ Name: "vals", Type: cty.DynamicPseudoType, diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/json.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/json.go index 02770a65..63dd320e 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/json.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/json.go @@ -7,6 +7,7 @@ import ( ) var JSONEncodeFunc = function.New(&function.Spec{ + Description: `Returns a string containing a JSON representation of the given value.`, Params: []function.Parameter{ { Name: "val", @@ -39,6 +40,7 @@ var JSONEncodeFunc = function.New(&function.Spec{ }) var JSONDecodeFunc = function.New(&function.Spec{ + Description: `Parses the given string as JSON and returns a value corresponding to what the JSON document describes.`, Params: []function.Parameter{ { Name: "str", diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/number.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/number.go index 4effeb7b..ce737513 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/number.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/number.go @@ -11,6 +11,7 @@ import ( ) var AbsoluteFunc = function.New(&function.Spec{ + Description: `If the given number is negative then returns its positive equivalent, or otherwise returns the given number unchanged.`, Params: []function.Parameter{ { Name: "num", @@ -26,6 +27,7 @@ var AbsoluteFunc = function.New(&function.Spec{ }) var AddFunc = function.New(&function.Spec{ + Description: `Returns the sum of the two given numbers.`, Params: []function.Parameter{ { Name: "a", @@ -59,6 +61,7 @@ var AddFunc = function.New(&function.Spec{ }) var SubtractFunc = function.New(&function.Spec{ + Description: `Returns the difference between the two given numbers.`, Params: []function.Parameter{ { Name: "a", @@ -92,6 +95,7 @@ var SubtractFunc = function.New(&function.Spec{ }) var MultiplyFunc = function.New(&function.Spec{ + Description: `Returns the product of the two given numbers.`, Params: []function.Parameter{ { Name: "a", @@ -126,6 +130,7 @@ var MultiplyFunc = function.New(&function.Spec{ }) var DivideFunc = function.New(&function.Spec{ + Description: `Divides the first given number by the second.`, Params: []function.Parameter{ { Name: "a", @@ -160,6 +165,7 @@ var DivideFunc = function.New(&function.Spec{ }) var ModuloFunc = function.New(&function.Spec{ + Description: `Divides the first given number by the second and then returns the remainder.`, Params: []function.Parameter{ { Name: "a", @@ -194,6 +200,7 @@ var ModuloFunc = function.New(&function.Spec{ }) var GreaterThanFunc = function.New(&function.Spec{ + Description: `Returns true if and only if the second number is greater than the first.`, Params: []function.Parameter{ { Name: "a", @@ -215,6 +222,7 @@ var GreaterThanFunc = function.New(&function.Spec{ }) var GreaterThanOrEqualToFunc = function.New(&function.Spec{ + Description: `Returns true if and only if the second number is greater than or equal to the first.`, Params: []function.Parameter{ { Name: "a", @@ -236,6 +244,7 @@ var GreaterThanOrEqualToFunc = function.New(&function.Spec{ }) var LessThanFunc = function.New(&function.Spec{ + Description: `Returns true if and only if the second number is less than the first.`, Params: []function.Parameter{ { Name: "a", @@ -257,6 +266,7 @@ var LessThanFunc = function.New(&function.Spec{ }) var LessThanOrEqualToFunc = function.New(&function.Spec{ + Description: `Returns true if and only if the second number is less than or equal to the first.`, Params: []function.Parameter{ { Name: "a", @@ -278,6 +288,7 @@ var LessThanOrEqualToFunc = function.New(&function.Spec{ }) var NegateFunc = function.New(&function.Spec{ + Description: `Multiplies the given number by -1.`, Params: []function.Parameter{ { Name: "num", @@ -293,7 +304,8 @@ var NegateFunc = function.New(&function.Spec{ }) var MinFunc = function.New(&function.Spec{ - Params: []function.Parameter{}, + Description: `Returns the numerically smallest of all of the given numbers.`, + Params: []function.Parameter{}, VarParam: &function.Parameter{ Name: "numbers", Type: cty.Number, @@ -317,7 +329,8 @@ var MinFunc = function.New(&function.Spec{ }) var MaxFunc = function.New(&function.Spec{ - Params: []function.Parameter{}, + Description: `Returns the numerically greatest of all of the given numbers.`, + Params: []function.Parameter{}, VarParam: &function.Parameter{ Name: "numbers", Type: cty.Number, @@ -341,6 +354,7 @@ var MaxFunc = function.New(&function.Spec{ }) var IntFunc = function.New(&function.Spec{ + Description: `Discards any fractional portion of the given number.`, Params: []function.Parameter{ { Name: "num", @@ -363,6 +377,7 @@ var IntFunc = function.New(&function.Spec{ // CeilFunc is a function that returns the closest whole number greater // than or equal to the given value. var CeilFunc = function.New(&function.Spec{ + Description: `Returns the smallest whole number that is greater than or equal to the given value.`, Params: []function.Parameter{ { Name: "num", @@ -392,6 +407,7 @@ var CeilFunc = function.New(&function.Spec{ // FloorFunc is a function that returns the closest whole number lesser // than or equal to the given value. var FloorFunc = function.New(&function.Spec{ + Description: `Returns the greatest whole number that is less than or equal to the given value.`, Params: []function.Parameter{ { Name: "num", @@ -420,6 +436,7 @@ var FloorFunc = function.New(&function.Spec{ // LogFunc is a function that returns the logarithm of a given number in a given base. var LogFunc = function.New(&function.Spec{ + Description: `Returns the logarithm of the given number in the given base.`, Params: []function.Parameter{ { Name: "num", @@ -448,6 +465,7 @@ var LogFunc = function.New(&function.Spec{ // PowFunc is a function that returns the logarithm of a given number in a given base. var PowFunc = function.New(&function.Spec{ + Description: `Returns the given number raised to the given power (exponentiation).`, Params: []function.Parameter{ { Name: "num", @@ -477,6 +495,7 @@ var PowFunc = function.New(&function.Spec{ // SignumFunc is a function that determines the sign of a number, returning a // number between -1 and 1 to represent the sign.. var SignumFunc = function.New(&function.Spec{ + Description: `Returns 0 if the given number is zero, 1 if the given number is positive, or -1 if the given number is negative.`, Params: []function.Parameter{ { Name: "num", @@ -502,6 +521,7 @@ var SignumFunc = function.New(&function.Spec{ // ParseIntFunc is a function that parses a string argument and returns an integer of the specified base. var ParseIntFunc = function.New(&function.Spec{ + Description: `Parses the given string as a number of the given base, or raises an error if the string contains invalid characters.`, Params: []function.Parameter{ { Name: "number", diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/regexp.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/regexp.go index 2dd6348a..ab4257b6 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/regexp.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/regexp.go @@ -10,6 +10,7 @@ import ( ) var RegexFunc = function.New(&function.Spec{ + Description: `Applies the given regular expression pattern to the given string and returns information about a single match, or raises an error if there is no match.`, Params: []function.Parameter{ { Name: "pattern", @@ -54,6 +55,7 @@ var RegexFunc = function.New(&function.Spec{ }) var RegexAllFunc = function.New(&function.Spec{ + Description: `Applies the given regular expression pattern to the given string and returns a list of information about all non-overlapping matches, or an empty list if there are no matches.`, Params: []function.Parameter{ { Name: "pattern", diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/sequence.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/sequence.go index 6a6f66b3..6b2d97b4 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/sequence.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/sequence.go @@ -9,7 +9,8 @@ import ( ) var ConcatFunc = function.New(&function.Spec{ - Params: []function.Parameter{}, + Description: `Concatenates together all of the given lists or tuples into a single sequence, preserving the input order.`, + Params: []function.Parameter{}, VarParam: &function.Parameter{ Name: "seqs", Type: cty.DynamicPseudoType, @@ -137,6 +138,7 @@ var ConcatFunc = function.New(&function.Spec{ }) var RangeFunc = function.New(&function.Spec{ + Description: `Returns a list of numbers spread evenly over a particular range.`, VarParam: &function.Parameter{ Name: "params", Type: cty.Number, diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/set.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/set.go index 29c425ea..15f4c05e 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/set.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/set.go @@ -10,6 +10,7 @@ import ( ) var SetHasElementFunc = function.New(&function.Spec{ + Description: `Returns true if the given set contains the given element, or false otherwise.`, Params: []function.Parameter{ { Name: "set", @@ -29,6 +30,7 @@ var SetHasElementFunc = function.New(&function.Spec{ }) var SetUnionFunc = function.New(&function.Spec{ + Description: `Returns the union of all given sets.`, Params: []function.Parameter{ { Name: "first_set", @@ -48,6 +50,7 @@ var SetUnionFunc = function.New(&function.Spec{ }) var SetIntersectionFunc = function.New(&function.Spec{ + Description: `Returns the intersection of all given sets.`, Params: []function.Parameter{ { Name: "first_set", @@ -67,6 +70,7 @@ var SetIntersectionFunc = function.New(&function.Spec{ }) var SetSubtractFunc = function.New(&function.Spec{ + Description: `Returns the relative complement of the two given sets.`, Params: []function.Parameter{ { Name: "a", @@ -86,6 +90,7 @@ var SetSubtractFunc = function.New(&function.Spec{ }) var SetSymmetricDifferenceFunc = function.New(&function.Spec{ + Description: `Returns the symmetric difference of the two given sets.`, Params: []function.Parameter{ { Name: "first_set", diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/string.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/string.go index 43182dd5..f340ef74 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/string.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/string.go @@ -14,6 +14,7 @@ import ( ) var UpperFunc = function.New(&function.Spec{ + Description: "Returns the given string with all Unicode letters translated to their uppercase equivalents.", Params: []function.Parameter{ { Name: "str", @@ -30,6 +31,7 @@ var UpperFunc = function.New(&function.Spec{ }) var LowerFunc = function.New(&function.Spec{ + Description: "Returns the given string with all Unicode letters translated to their lowercase equivalents.", Params: []function.Parameter{ { Name: "str", @@ -46,6 +48,7 @@ var LowerFunc = function.New(&function.Spec{ }) var ReverseFunc = function.New(&function.Spec{ + Description: "Returns the given string with all of its Unicode characters in reverse order.", Params: []function.Parameter{ { Name: "str", @@ -73,6 +76,7 @@ var ReverseFunc = function.New(&function.Spec{ }) var StrlenFunc = function.New(&function.Spec{ + Description: "Returns the number of Unicode characters (technically: grapheme clusters) in the given string.", Params: []function.Parameter{ { Name: "str", @@ -97,19 +101,23 @@ var StrlenFunc = function.New(&function.Spec{ }) var SubstrFunc = function.New(&function.Spec{ + Description: "Extracts a substring from the given string.", Params: []function.Parameter{ { Name: "str", + Description: "The input string.", Type: cty.String, AllowDynamicType: true, }, { Name: "offset", + Description: "The starting offset in Unicode characters.", Type: cty.Number, AllowDynamicType: true, }, { Name: "length", + Description: "The maximum length of the result in Unicode characters.", Type: cty.Number, AllowDynamicType: true, }, @@ -197,15 +205,18 @@ var SubstrFunc = function.New(&function.Spec{ }) var JoinFunc = function.New(&function.Spec{ + Description: "Concatenates together the elements of all given lists with a delimiter, producing a single string.", Params: []function.Parameter{ { - Name: "separator", - Type: cty.String, + Name: "separator", + Description: "Delimiter to insert between the given strings.", + Type: cty.String, }, }, VarParam: &function.Parameter{ - Name: "lists", - Type: cty.List(cty.String), + Name: "lists", + Description: "One or more lists of strings to join.", + Type: cty.List(cty.String), }, Type: function.StaticReturnType(cty.String), Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { @@ -244,6 +255,7 @@ var JoinFunc = function.New(&function.Spec{ }) var SortFunc = function.New(&function.Spec{ + Description: "Applies a lexicographic sort to the elements of the given list.", Params: []function.Parameter{ { Name: "list", @@ -282,14 +294,17 @@ var SortFunc = function.New(&function.Spec{ }) var SplitFunc = function.New(&function.Spec{ + Description: "Produces a list of one or more strings by splitting the given string at all instances of a given separator substring.", Params: []function.Parameter{ { - Name: "separator", - Type: cty.String, + Name: "separator", + Description: "The substring that delimits the result strings.", + Type: cty.String, }, { - Name: "str", - Type: cty.String, + Name: "str", + Description: "The string to split.", + Type: cty.String, }, }, Type: function.StaticReturnType(cty.List(cty.String)), @@ -311,6 +326,7 @@ var SplitFunc = function.New(&function.Spec{ // ChompFunc is a function that removes newline characters at the end of a // string. var ChompFunc = function.New(&function.Spec{ + Description: "Removes one or more newline characters from the end of the given string.", Params: []function.Parameter{ { Name: "str", @@ -327,14 +343,17 @@ var ChompFunc = function.New(&function.Spec{ // IndentFunc is a function that adds a given number of spaces to the // beginnings of all but the first line in a given multi-line string. var IndentFunc = function.New(&function.Spec{ + Description: "Adds a given number of spaces after each newline character in the given string.", Params: []function.Parameter{ { - Name: "spaces", - Type: cty.Number, + Name: "spaces", + Description: "Number of spaces to add after each newline character.", + Type: cty.Number, }, { - Name: "str", - Type: cty.String, + Name: "str", + Description: "The string to transform.", + Type: cty.String, }, }, Type: function.StaticReturnType(cty.String), @@ -352,6 +371,7 @@ var IndentFunc = function.New(&function.Spec{ // TitleFunc is a function that converts the first letter of each word in the // given string to uppercase. var TitleFunc = function.New(&function.Spec{ + Description: "Replaces one letter after each non-letter and non-digit character with its uppercase equivalent.", Params: []function.Parameter{ { Name: "str", @@ -367,6 +387,7 @@ var TitleFunc = function.New(&function.Spec{ // TrimSpaceFunc is a function that removes any space characters from the start // and end of the given string. var TrimSpaceFunc = function.New(&function.Spec{ + Description: "Removes any consecutive space characters (as defined by Unicode) from the start and end of the given string.", Params: []function.Parameter{ { Name: "str", @@ -382,20 +403,26 @@ var TrimSpaceFunc = function.New(&function.Spec{ // TrimFunc is a function that removes the specified characters from the start // and end of the given string. var TrimFunc = function.New(&function.Spec{ + Description: "Removes consecutive sequences of characters in \"cutset\" from the start and end of the given string.", Params: []function.Parameter{ { - Name: "str", - Type: cty.String, + Name: "str", + Description: "The string to trim.", + Type: cty.String, }, { - Name: "cutset", - Type: cty.String, + Name: "cutset", + Description: "A string containing all of the characters to trim. Each character is taken separately, so the order of characters is insignificant.", + Type: cty.String, }, }, Type: function.StaticReturnType(cty.String), Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { str := args[0].AsString() cutset := args[1].AsString() + // NOTE: This doesn't properly handle any character that is encoded + // with multiple sequential code units, such as letters with + // combining diacritics and emoji modifier sequences. return cty.StringVal(strings.Trim(str, cutset)), nil }, }) @@ -403,14 +430,17 @@ var TrimFunc = function.New(&function.Spec{ // TrimPrefixFunc is a function that removes the specified characters from the // start the given string. var TrimPrefixFunc = function.New(&function.Spec{ + Description: "Removes the given prefix from the start of the given string, if present.", Params: []function.Parameter{ { - Name: "str", - Type: cty.String, + Name: "str", + Description: "The string to trim.", + Type: cty.String, }, { - Name: "prefix", - Type: cty.String, + Name: "prefix", + Description: "The prefix to remove, if present.", + Type: cty.String, }, }, Type: function.StaticReturnType(cty.String), @@ -424,14 +454,17 @@ var TrimPrefixFunc = function.New(&function.Spec{ // TrimSuffixFunc is a function that removes the specified characters from the // end of the given string. var TrimSuffixFunc = function.New(&function.Spec{ + Description: "Removes the given suffix from the start of the given string, if present.", Params: []function.Parameter{ { - Name: "str", - Type: cty.String, + Name: "str", + Description: "The string to trim.", + Type: cty.String, }, { - Name: "suffix", - Type: cty.String, + Name: "suffix", + Description: "The suffix to remove, if present.", + Type: cty.String, }, }, Type: function.StaticReturnType(cty.String), diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/string_replace.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/string_replace.go index f777ce5c..573083bc 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/string_replace.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/string_replace.go @@ -12,18 +12,22 @@ import ( // substring, and replaces each occurence with a given replacement string. // The substr argument is a simple string. var ReplaceFunc = function.New(&function.Spec{ + Description: `Replaces all instances of the given substring in the given string with the given replacement string.`, Params: []function.Parameter{ { - Name: "str", - Type: cty.String, + Name: "str", + Description: `The string to search within.`, + Type: cty.String, }, { - Name: "substr", - Type: cty.String, + Name: "substr", + Description: `The substring to search for.`, + Type: cty.String, }, { - Name: "replace", - Type: cty.String, + Name: "replace", + Description: `The new substring to replace substr with.`, + Type: cty.String, }, }, Type: function.StaticReturnType(cty.String), @@ -40,13 +44,14 @@ var ReplaceFunc = function.New(&function.Spec{ // given substring, and replaces each occurence with a given replacement // string. The substr argument must be a valid regular expression. var RegexReplaceFunc = function.New(&function.Spec{ + Description: `Applies the given regular expression pattern to the given string and replaces all matches with the given replacement string.`, Params: []function.Parameter{ { Name: "str", Type: cty.String, }, { - Name: "substr", + Name: "pattern", Type: cty.String, }, { diff --git a/vendor/go.opencensus.io/Makefile b/vendor/go.opencensus.io/Makefile index b3ce3df3..d896edc9 100644 --- a/vendor/go.opencensus.io/Makefile +++ b/vendor/go.opencensus.io/Makefile @@ -91,7 +91,7 @@ embedmd: .PHONY: install-tools install-tools: - go get -u golang.org/x/lint/golint - go get -u golang.org/x/tools/cmd/cover - go get -u golang.org/x/tools/cmd/goimports - go get -u github.com/rakyll/embedmd + go install golang.org/x/lint/golint@latest + go install golang.org/x/tools/cmd/cover@latest + go install golang.org/x/tools/cmd/goimports@latest + go install github.com/rakyll/embedmd@latest diff --git a/vendor/go.opencensus.io/opencensus.go b/vendor/go.opencensus.io/opencensus.go index e5e4b436..11e31f42 100644 --- a/vendor/go.opencensus.io/opencensus.go +++ b/vendor/go.opencensus.io/opencensus.go @@ -17,5 +17,5 @@ package opencensus // import "go.opencensus.io" // Version is the current release version of OpenCensus in use. func Version() string { - return "0.23.0" + return "0.24.0" } diff --git a/vendor/go.opencensus.io/trace/doc.go b/vendor/go.opencensus.io/trace/doc.go index 04b1ee4f..7a1616a5 100644 --- a/vendor/go.opencensus.io/trace/doc.go +++ b/vendor/go.opencensus.io/trace/doc.go @@ -18,24 +18,23 @@ Package trace contains support for OpenCensus distributed tracing. The following assumes a basic familiarity with OpenCensus concepts. See http://opencensus.io - -Exporting Traces +# Exporting Traces To export collected tracing data, register at least one exporter. You can use one of the provided exporters or write your own. - trace.RegisterExporter(exporter) + trace.RegisterExporter(exporter) By default, traces will be sampled relatively rarely. To change the sampling frequency for your entire program, call ApplyConfig. Use a ProbabilitySampler to sample a subset of traces, or use AlwaysSample to collect a trace on every run: - trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) + trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) Be careful about using trace.AlwaysSample in a production application with significant traffic: a new trace will be started and exported for every request. -Adding Spans to a Trace +# Adding Spans to a Trace A trace consists of a tree of spans. In Go, the current span is carried in a context.Context. @@ -44,8 +43,8 @@ It is common to want to capture all the activity of a function call in a span. F this to work, the function must take a context.Context as a parameter. Add these two lines to the top of the function: - ctx, span := trace.StartSpan(ctx, "example.com/Run") - defer span.End() + ctx, span := trace.StartSpan(ctx, "example.com/Run") + defer span.End() StartSpan will create a new top-level span if the context doesn't contain another span, otherwise it will create a child span. diff --git a/vendor/go.opencensus.io/trace/lrumap.go b/vendor/go.opencensus.io/trace/lrumap.go index 908c2497..80095a5f 100644 --- a/vendor/go.opencensus.io/trace/lrumap.go +++ b/vendor/go.opencensus.io/trace/lrumap.go @@ -44,7 +44,7 @@ func (lm lruMap) len() int { } func (lm lruMap) keys() []interface{} { - keys := make([]interface{}, len(lm.cacheKeys)) + keys := make([]interface{}, 0, len(lm.cacheKeys)) for k := range lm.cacheKeys { keys = append(keys, k) } diff --git a/vendor/go.opencensus.io/trace/trace_go11.go b/vendor/go.opencensus.io/trace/trace_go11.go index b7d8aaf2..b8fc1e49 100644 --- a/vendor/go.opencensus.io/trace/trace_go11.go +++ b/vendor/go.opencensus.io/trace/trace_go11.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build go1.11 // +build go1.11 package trace diff --git a/vendor/go.opencensus.io/trace/trace_nongo11.go b/vendor/go.opencensus.io/trace/trace_nongo11.go index e2541985..da488fc8 100644 --- a/vendor/go.opencensus.io/trace/trace_nongo11.go +++ b/vendor/go.opencensus.io/trace/trace_nongo11.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !go1.11 // +build !go1.11 package trace diff --git a/vendor/modules.txt b/vendor/modules.txt index 5b8e7c18..6a6bbdaa 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,3 +1,5 @@ +# cloud.google.com/go/storage v1.28.1 +## explicit; go 1.19 # github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 ## explicit; go 1.16 github.com/Azure/go-ansiterm @@ -11,7 +13,7 @@ github.com/Masterminds/goutils # github.com/Masterminds/semver/v3 v3.1.1 ## explicit; go 1.12 github.com/Masterminds/semver/v3 -# github.com/Masterminds/sprig/v3 v3.2.0 +# github.com/Masterminds/sprig/v3 v3.2.1 ## explicit; go 1.13 github.com/Masterminds/sprig/v3 # github.com/Microsoft/go-winio v0.5.2 @@ -99,7 +101,7 @@ github.com/containerd/containerd/sys ## explicit; go 1.15 github.com/containerd/stargz-snapshotter/estargz github.com/containerd/stargz-snapshotter/estargz/errorutil -# github.com/containernetworking/plugins v1.1.1 +# github.com/containernetworking/plugins v1.2.0 ## explicit; go 1.17 github.com/containernetworking/plugins/pkg/ns # github.com/coreos/go-iptables v0.6.0 @@ -112,7 +114,7 @@ github.com/coreos/go-systemd/import1 github.com/coreos/go-systemd/internal/dlopen github.com/coreos/go-systemd/machine1 github.com/coreos/go-systemd/util -# github.com/coreos/go-systemd/v22 v22.3.2 +# github.com/coreos/go-systemd/v22 v22.5.0 ## explicit; go 1.12 github.com/coreos/go-systemd/v22/dbus # github.com/creack/pty v1.1.18 @@ -124,7 +126,7 @@ github.com/cyphar/filepath-securejoin # github.com/davecgh/go-spew v1.1.1 ## explicit github.com/davecgh/go-spew/spew -# github.com/docker/cli v20.10.21+incompatible +# github.com/docker/cli v23.0.1+incompatible ## explicit github.com/docker/cli/cli/config github.com/docker/cli/cli/config/configfile @@ -220,6 +222,13 @@ github.com/golang/snappy # github.com/google/btree v1.0.0 ## explicit github.com/google/btree +# github.com/google/go-cmp v0.5.9 +## explicit; go 1.13 +github.com/google/go-cmp/cmp +github.com/google/go-cmp/cmp/internal/diff +github.com/google/go-cmp/cmp/internal/flags +github.com/google/go-cmp/cmp/internal/function +github.com/google/go-cmp/cmp/internal/value # github.com/google/go-containerregistry v0.5.1 ## explicit; go 1.14 github.com/google/go-containerregistry/internal/and @@ -283,8 +292,8 @@ github.com/hashicorp/go-msgpack/codec # github.com/hashicorp/go-multierror v1.1.1 ## explicit; go 1.13 github.com/hashicorp/go-multierror -# github.com/hashicorp/go-plugin v1.4.3 -## explicit; go 1.13 +# github.com/hashicorp/go-plugin v1.4.9 +## explicit; go 1.17 github.com/hashicorp/go-plugin github.com/hashicorp/go-plugin/internal/plugin # github.com/hashicorp/go-retryablehttp v0.7.0 @@ -308,10 +317,10 @@ github.com/hashicorp/go-secure-stdlib/reloadutil # github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 ## explicit; go 1.16 github.com/hashicorp/go-secure-stdlib/strutil -# github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.1 +# github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.2 ## explicit; go 1.16 github.com/hashicorp/go-secure-stdlib/tlsutil -# github.com/hashicorp/go-set v0.1.6 +# github.com/hashicorp/go-set v0.1.8 ## explicit; go 1.18 github.com/hashicorp/go-set # github.com/hashicorp/go-sockaddr v1.0.2 @@ -349,8 +358,8 @@ github.com/hashicorp/hcl/v2/json # github.com/hashicorp/memberlist v0.5.0 ## explicit; go 1.12 github.com/hashicorp/memberlist -# github.com/hashicorp/nomad v1.4.4 -## explicit; go 1.19 +# github.com/hashicorp/nomad v1.4.6 +## explicit; go 1.20 github.com/hashicorp/nomad/acl github.com/hashicorp/nomad/ci github.com/hashicorp/nomad/client/allocdir @@ -391,7 +400,6 @@ github.com/hashicorp/nomad/helper/discover github.com/hashicorp/nomad/helper/envoy github.com/hashicorp/nomad/helper/escapingfs github.com/hashicorp/nomad/helper/flatmap -github.com/hashicorp/nomad/helper/freeport github.com/hashicorp/nomad/helper/grpc-middleware/logging github.com/hashicorp/nomad/helper/ipaddr github.com/hashicorp/nomad/helper/mount @@ -402,6 +410,7 @@ github.com/hashicorp/nomad/helper/pluginutils/loader github.com/hashicorp/nomad/helper/pointer github.com/hashicorp/nomad/helper/stats github.com/hashicorp/nomad/helper/testlog +github.com/hashicorp/nomad/helper/tlsutil github.com/hashicorp/nomad/helper/users github.com/hashicorp/nomad/helper/uuid github.com/hashicorp/nomad/lib/cpuset @@ -439,13 +448,13 @@ github.com/hashicorp/raft-autopilot ## explicit; go 1.12 github.com/hashicorp/serf/coordinate github.com/hashicorp/serf/serf -# github.com/hashicorp/vault/api v1.8.1 -## explicit; go 1.17 +# github.com/hashicorp/vault/api v1.8.2 +## explicit; go 1.19 github.com/hashicorp/vault/api # github.com/hashicorp/vault/api/auth/kubernetes v0.3.0 ## explicit; go 1.16 github.com/hashicorp/vault/api/auth/kubernetes -# github.com/hashicorp/vault/sdk v0.6.0 +# github.com/hashicorp/vault/sdk v0.6.1 ## explicit; go 1.19 github.com/hashicorp/vault/sdk/helper/certutil github.com/hashicorp/vault/sdk/helper/compressutil @@ -502,7 +511,7 @@ github.com/mattn/go-isatty # github.com/miekg/dns v1.1.50 ## explicit; go 1.14 github.com/miekg/dns -# github.com/mitchellh/cli v1.1.4 +# github.com/mitchellh/cli v1.1.5 ## explicit; go 1.11 github.com/mitchellh/cli # github.com/mitchellh/copystructure v1.2.0 @@ -548,8 +557,6 @@ github.com/mrunalp/fileutils # github.com/oklog/run v1.1.0 ## explicit; go 1.13 github.com/oklog/run -# github.com/onsi/gomega v1.17.0 -## explicit; go 1.16 # github.com/opencontainers/go-digest v1.0.0 ## explicit; go 1.13 github.com/opencontainers/go-digest @@ -613,8 +620,8 @@ github.com/posener/complete/cmd/install # github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c ## explicit; go 1.14 github.com/power-devops/perfstat -# github.com/rogpeppe/go-internal v1.6.1 -## explicit; go 1.11 +# github.com/rogpeppe/go-internal v1.9.0 +## explicit; go 1.17 github.com/rogpeppe/go-internal/fmtsort # github.com/ryanuber/go-glob v1.0.0 ## explicit @@ -625,7 +632,7 @@ github.com/sean-/seed # github.com/seccomp/libseccomp-golang v0.10.0 ## explicit; go 1.14 github.com/seccomp/libseccomp-golang -# github.com/shirou/gopsutil/v3 v3.22.8 +# github.com/shirou/gopsutil/v3 v3.23.1 ## explicit; go 1.15 github.com/shirou/gopsutil/v3/cpu github.com/shirou/gopsutil/v3/disk @@ -634,6 +641,15 @@ github.com/shirou/gopsutil/v3/internal/common github.com/shirou/gopsutil/v3/mem github.com/shirou/gopsutil/v3/net github.com/shirou/gopsutil/v3/process +# github.com/shoenig/test v0.6.1 +## explicit; go 1.18 +github.com/shoenig/test/interfaces +github.com/shoenig/test/internal/assertions +github.com/shoenig/test/internal/brokenfs +github.com/shoenig/test/internal/constraints +github.com/shoenig/test/must +github.com/shoenig/test/portal +github.com/shoenig/test/wait # github.com/shopspring/decimal v1.2.0 ## explicit; go 1.13 github.com/shopspring/decimal @@ -650,11 +666,11 @@ github.com/stretchr/testify/require # github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 ## explicit github.com/syndtr/gocapability/capability -# github.com/tklauser/go-sysconf v0.3.10 +# github.com/tklauser/go-sysconf v0.3.11 ## explicit; go 1.13 github.com/tklauser/go-sysconf -# github.com/tklauser/numcpus v0.4.0 -## explicit; go 1.11 +# github.com/tklauser/numcpus v0.6.0 +## explicit; go 1.13 github.com/tklauser/numcpus # github.com/vishvananda/netlink v1.2.1-beta.2 ## explicit; go 1.12 @@ -675,7 +691,7 @@ github.com/vmihailenco/tagparser/internal/parser # github.com/yusufpapurcu/wmi v1.2.2 ## explicit; go 1.16 github.com/yusufpapurcu/wmi -# github.com/zclconf/go-cty v1.11.0 +# github.com/zclconf/go-cty v1.12.1 ## explicit; go 1.18 github.com/zclconf/go-cty/cty github.com/zclconf/go-cty/cty/convert @@ -688,7 +704,7 @@ github.com/zclconf/go-cty/cty/set # go.etcd.io/bbolt v1.3.6 ## explicit; go 1.12 go.etcd.io/bbolt -# go.opencensus.io v0.23.0 +# go.opencensus.io v0.24.0 ## explicit; go 1.13 go.opencensus.io go.opencensus.io/internal @@ -766,6 +782,8 @@ golang.org/x/tools/internal/pkgbits golang.org/x/tools/internal/tokeninternal golang.org/x/tools/internal/typeparams golang.org/x/tools/internal/typesinternal +# google.golang.org/api v0.114.0 +## explicit; go 1.19 # google.golang.org/appengine v1.6.7 ## explicit; go 1.11 google.golang.org/appengine